Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1mod builder_types;
2mod place_wisp;
3mod cid;
4mod blob_map;
5mod metadata;
6mod download;
7mod pull;
8mod serve;
9mod subfs_utils;
10mod redirects;
11
12use clap::{Parser, Subcommand};
13use jacquard::CowStr;
14use jacquard::client::{Agent, FileAuthStore, AgentSessionExt, MemoryCredentialSession, AgentSession};
15use jacquard::oauth::client::OAuthClient;
16use jacquard::oauth::loopback::LoopbackConfig;
17use jacquard::prelude::IdentityResolver;
18use jacquard_common::types::string::{Datetime, Rkey, RecordKey};
19use jacquard_common::types::blob::MimeType;
20use miette::IntoDiagnostic;
21use std::path::{Path, PathBuf};
22use std::collections::HashMap;
23use flate2::Compression;
24use flate2::write::GzEncoder;
25use std::io::Write;
26use base64::Engine;
27use futures::stream::{self, StreamExt};
28
29use place_wisp::fs::*;
30
31#[derive(Parser, Debug)]
32#[command(author, version, about = "wisp.place CLI tool")]
33struct Args {
34 #[command(subcommand)]
35 command: Option<Commands>,
36
37 // Deploy arguments (when no subcommand is specified)
38 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
39 #[arg(global = true, conflicts_with = "command")]
40 input: Option<CowStr<'static>>,
41
42 /// Path to the directory containing your static site
43 #[arg(short, long, global = true, conflicts_with = "command")]
44 path: Option<PathBuf>,
45
46 /// Site name (defaults to directory name)
47 #[arg(short, long, global = true, conflicts_with = "command")]
48 site: Option<String>,
49
50 /// Path to auth store file
51 #[arg(long, global = true, conflicts_with = "command")]
52 store: Option<String>,
53
54 /// App Password for authentication
55 #[arg(long, global = true, conflicts_with = "command")]
56 password: Option<CowStr<'static>>,
57}
58
59#[derive(Subcommand, Debug)]
60enum Commands {
61 /// Deploy a static site to wisp.place (default command)
62 Deploy {
63 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
64 input: CowStr<'static>,
65
66 /// Path to the directory containing your static site
67 #[arg(short, long, default_value = ".")]
68 path: PathBuf,
69
70 /// Site name (defaults to directory name)
71 #[arg(short, long)]
72 site: Option<String>,
73
74 /// Path to auth store file (will be created if missing, only used with OAuth)
75 #[arg(long, default_value = "/tmp/wisp-oauth-session.json")]
76 store: String,
77
78 /// App Password for authentication (alternative to OAuth)
79 #[arg(long)]
80 password: Option<CowStr<'static>>,
81 },
82 /// Pull a site from the PDS to a local directory
83 Pull {
84 /// Handle (e.g., alice.bsky.social) or DID
85 input: CowStr<'static>,
86
87 /// Site name (record key)
88 #[arg(short, long)]
89 site: String,
90
91 /// Output directory for the downloaded site
92 #[arg(short, long, default_value = ".")]
93 output: PathBuf,
94 },
95 /// Serve a site locally with real-time firehose updates
96 Serve {
97 /// Handle (e.g., alice.bsky.social) or DID
98 input: CowStr<'static>,
99
100 /// Site name (record key)
101 #[arg(short, long)]
102 site: String,
103
104 /// Output directory for the site files
105 #[arg(short, long, default_value = ".")]
106 output: PathBuf,
107
108 /// Port to serve on
109 #[arg(short, long, default_value = "8080")]
110 port: u16,
111 },
112}
113
114#[tokio::main]
115async fn main() -> miette::Result<()> {
116 let args = Args::parse();
117
118 let result = match args.command {
119 Some(Commands::Deploy { input, path, site, store, password }) => {
120 // Dispatch to appropriate authentication method
121 if let Some(password) = password {
122 run_with_app_password(input, password, path, site).await
123 } else {
124 run_with_oauth(input, store, path, site).await
125 }
126 }
127 Some(Commands::Pull { input, site, output }) => {
128 pull::pull_site(input, CowStr::from(site), output).await
129 }
130 Some(Commands::Serve { input, site, output, port }) => {
131 serve::serve_site(input, CowStr::from(site), output, port).await
132 }
133 None => {
134 // Legacy mode: if input is provided, assume deploy command
135 if let Some(input) = args.input {
136 let path = args.path.unwrap_or_else(|| PathBuf::from("."));
137 let store = args.store.unwrap_or_else(|| "/tmp/wisp-oauth-session.json".to_string());
138
139 // Dispatch to appropriate authentication method
140 if let Some(password) = args.password {
141 run_with_app_password(input, password, path, args.site).await
142 } else {
143 run_with_oauth(input, store, path, args.site).await
144 }
145 } else {
146 // No command and no input, show help
147 use clap::CommandFactory;
148 Args::command().print_help().into_diagnostic()?;
149 Ok(())
150 }
151 }
152 };
153
154 // Force exit to avoid hanging on background tasks/connections
155 match result {
156 Ok(_) => std::process::exit(0),
157 Err(e) => {
158 eprintln!("{:?}", e);
159 std::process::exit(1)
160 }
161 }
162}
163
164/// Run deployment with app password authentication
165async fn run_with_app_password(
166 input: CowStr<'static>,
167 password: CowStr<'static>,
168 path: PathBuf,
169 site: Option<String>,
170) -> miette::Result<()> {
171 let (session, auth) =
172 MemoryCredentialSession::authenticated(input, password, None, None).await?;
173 println!("Signed in as {}", auth.handle);
174
175 let agent: Agent<_> = Agent::from(session);
176 deploy_site(&agent, path, site).await
177}
178
179/// Run deployment with OAuth authentication
180async fn run_with_oauth(
181 input: CowStr<'static>,
182 store: String,
183 path: PathBuf,
184 site: Option<String>,
185) -> miette::Result<()> {
186 use jacquard::oauth::scopes::Scope;
187 use jacquard::oauth::atproto::AtprotoClientMetadata;
188 use jacquard::oauth::session::ClientData;
189 use url::Url;
190
191 // Request the necessary scopes for wisp.place
192 let scopes = Scope::parse_multiple("atproto repo:place.wisp.fs repo:place.wisp.subfs blob:*/*")
193 .map_err(|e| miette::miette!("Failed to parse scopes: {:?}", e))?;
194
195 // Create redirect URIs that match the loopback server (port 4000, path /oauth/callback)
196 let redirect_uris = vec![
197 Url::parse("http://127.0.0.1:4000/oauth/callback").into_diagnostic()?,
198 Url::parse("http://[::1]:4000/oauth/callback").into_diagnostic()?,
199 ];
200
201 // Create client metadata with matching redirect URIs and scopes
202 let client_data = ClientData {
203 keyset: None,
204 config: AtprotoClientMetadata::new_localhost(
205 Some(redirect_uris),
206 Some(scopes),
207 ),
208 };
209
210 let oauth = OAuthClient::new(FileAuthStore::new(&store), client_data);
211
212 let session = oauth
213 .login_with_local_server(input, Default::default(), LoopbackConfig::default())
214 .await?;
215
216 let agent: Agent<_> = Agent::from(session);
217 deploy_site(&agent, path, site).await
218}
219
220/// Deploy the site using the provided agent
221async fn deploy_site(
222 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
223 path: PathBuf,
224 site: Option<String>,
225) -> miette::Result<()> {
226 // Verify the path exists
227 if !path.exists() {
228 return Err(miette::miette!("Path does not exist: {}", path.display()));
229 }
230
231 // Get site name
232 let site_name = site.unwrap_or_else(|| {
233 path
234 .file_name()
235 .and_then(|n| n.to_str())
236 .unwrap_or("site")
237 .to_string()
238 });
239
240 println!("Deploying site '{}'...", site_name);
241
242 // Try to fetch existing manifest for incremental updates
243 let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
244 use jacquard_common::types::string::AtUri;
245
246 // Get the DID for this session
247 let session_info = agent.session_info().await;
248 if let Some((did, _)) = session_info {
249 // Construct the AT URI for the record
250 let uri_string = format!("at://{}/place.wisp.fs/{}", did, site_name);
251 if let Ok(uri) = AtUri::new(&uri_string) {
252 match agent.get_record::<Fs>(&uri).await {
253 Ok(response) => {
254 match response.into_output() {
255 Ok(record_output) => {
256 let existing_manifest = record_output.value;
257 let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
258 println!("Found existing manifest with {} files in main record", blob_map.len());
259
260 // Extract subfs URIs from main record
261 let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
262
263 if !subfs_uris.is_empty() {
264 println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
265
266 // Merge blob maps from all subfs records
267 match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
268 Ok(merged_count) => {
269 println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
270 }
271 Err(e) => {
272 eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
273 }
274 }
275
276 (blob_map, subfs_uris)
277 } else {
278 (blob_map, Vec::new())
279 }
280 }
281 Err(_) => {
282 println!("No existing manifest found, uploading all files...");
283 (HashMap::new(), Vec::new())
284 }
285 }
286 }
287 Err(_) => {
288 // Record doesn't exist yet - this is a new site
289 println!("No existing manifest found, uploading all files...");
290 (HashMap::new(), Vec::new())
291 }
292 }
293 } else {
294 println!("No existing manifest found (invalid URI), uploading all files...");
295 (HashMap::new(), Vec::new())
296 }
297 } else {
298 println!("No existing manifest found (could not get DID), uploading all files...");
299 (HashMap::new(), Vec::new())
300 }
301 };
302
303 // Build directory tree
304 let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new()).await?;
305 let uploaded_count = total_files - reused_count;
306
307 // Check if we need to split into subfs records
308 const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
309 const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
310 const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
311
312 let mut working_directory = root_dir;
313 let mut current_file_count = total_files;
314 let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
315
316 // Estimate initial manifest size
317 let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
318
319 if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
320 println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
321 total_files, manifest_size as f64 / 1024.0);
322
323 let mut attempts = 0;
324 const MAX_SPLIT_ATTEMPTS: usize = 50;
325
326 while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
327 attempts += 1;
328
329 // Find large directories to split
330 let directories = subfs_utils::find_large_directories(&working_directory, String::new());
331
332 if let Some(largest_dir) = directories.first() {
333 println!(" Split #{}: {} ({} files, {:.1}KB)",
334 attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
335
336 // Create a subfs record for this directory
337 use jacquard_common::types::string::Tid;
338 let subfs_tid = Tid::now_0();
339 let subfs_rkey = subfs_tid.to_string();
340
341 let subfs_manifest = crate::place_wisp::subfs::SubfsRecord::new()
342 .root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
343 .file_count(Some(largest_dir.file_count as i64))
344 .created_at(Datetime::now())
345 .build();
346
347 // Upload subfs record
348 let subfs_output = agent.put_record(
349 RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
350 subfs_manifest
351 ).await.into_diagnostic()?;
352
353 let subfs_uri = subfs_output.uri.to_string();
354 println!(" ✅ Created subfs: {}", subfs_uri);
355
356 // Replace directory with subfs node (flat: false to preserve structure)
357 working_directory = subfs_utils::replace_directory_with_subfs(
358 working_directory,
359 &largest_dir.path,
360 &subfs_uri,
361 false // Preserve directory structure
362 )?;
363
364 new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
365 current_file_count -= largest_dir.file_count;
366
367 // Recalculate manifest size
368 manifest_size = subfs_utils::estimate_directory_size(&working_directory);
369 println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
370 manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
371
372 if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
373 println!("✅ Manifest now fits within limits");
374 break;
375 }
376 } else {
377 println!(" No more subdirectories to split - stopping");
378 break;
379 }
380 }
381
382 if attempts >= MAX_SPLIT_ATTEMPTS {
383 return Err(miette::miette!(
384 "Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
385 MAX_SPLIT_ATTEMPTS,
386 manifest_size as f64 / 1024.0,
387 current_file_count
388 ));
389 }
390
391 println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
392 new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
393 } else {
394 println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
395 total_files, manifest_size as f64 / 1024.0);
396 }
397
398 // Create the final Fs record
399 let fs_record = Fs::new()
400 .site(CowStr::from(site_name.clone()))
401 .root(working_directory)
402 .file_count(current_file_count as i64)
403 .created_at(Datetime::now())
404 .build();
405
406 // Use site name as the record key
407 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
408 let output = agent.put_record(RecordKey::from(rkey), fs_record).await?;
409
410 // Extract DID from the AT URI (format: at://did:plc:xxx/collection/rkey)
411 let uri_str = output.uri.to_string();
412 let did = uri_str
413 .strip_prefix("at://")
414 .and_then(|s| s.split('/').next())
415 .ok_or_else(|| miette::miette!("Failed to parse DID from URI"))?;
416
417 println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
418 println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
419 println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
420
421 // Clean up old subfs records
422 if !old_subfs_uris.is_empty() {
423 println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
424
425 let mut deleted_count = 0;
426 let mut failed_count = 0;
427
428 for (uri, _path) in old_subfs_uris {
429 match subfs_utils::delete_subfs_record(agent, &uri).await {
430 Ok(_) => {
431 deleted_count += 1;
432 println!(" 🗑️ Deleted old subfs: {}", uri);
433 }
434 Err(e) => {
435 failed_count += 1;
436 eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
437 }
438 }
439 }
440
441 if failed_count > 0 {
442 eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
443 } else {
444 println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
445 }
446 }
447
448 Ok(())
449}
450
451/// Recursively build a Directory from a filesystem path
452/// current_path is the path from the root of the site (e.g., "" for root, "config" for config dir)
453fn build_directory<'a>(
454 agent: &'a Agent<impl jacquard::client::AgentSession + IdentityResolver + 'a>,
455 dir_path: &'a Path,
456 existing_blobs: &'a HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
457 current_path: String,
458) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<(Directory<'static>, usize, usize)>> + 'a>>
459{
460 Box::pin(async move {
461 // Collect all directory entries first
462 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
463 .into_diagnostic()?
464 .collect::<Result<Vec<_>, _>>()
465 .into_diagnostic()?;
466
467 // Separate files and directories
468 let mut file_tasks = Vec::new();
469 let mut dir_tasks = Vec::new();
470
471 for entry in dir_entries {
472 let path = entry.path();
473 let name = entry.file_name();
474 let name_str = name.to_str()
475 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
476 .to_string();
477
478 // Skip unwanted files and directories
479
480 // .git directory (version control - thousands of files)
481 if name_str == ".git" {
482 continue;
483 }
484
485 // .DS_Store (macOS metadata - can leak info)
486 if name_str == ".DS_Store" {
487 continue;
488 }
489
490 // .env files (environment variables with secrets)
491 if name_str.starts_with(".env") {
492 continue;
493 }
494
495 // node_modules (dependency folder - can be 100,000+ files)
496 if name_str == "node_modules" {
497 continue;
498 }
499
500 // OS metadata files
501 if name_str == "Thumbs.db" || name_str == "desktop.ini" || name_str.starts_with("._") {
502 continue;
503 }
504
505 // macOS system directories
506 if name_str == ".Spotlight-V100" || name_str == ".Trashes" || name_str == ".fseventsd" {
507 continue;
508 }
509
510 // Cache and temp directories
511 if name_str == ".cache" || name_str == ".temp" || name_str == ".tmp" {
512 continue;
513 }
514
515 // Python cache
516 if name_str == "__pycache__" || name_str.ends_with(".pyc") {
517 continue;
518 }
519
520 // Python virtual environments
521 if name_str == ".venv" || name_str == "venv" || name_str == "env" {
522 continue;
523 }
524
525 // Editor swap files
526 if name_str.ends_with(".swp") || name_str.ends_with(".swo") || name_str.ends_with("~") {
527 continue;
528 }
529
530 let metadata = entry.metadata().into_diagnostic()?;
531
532 if metadata.is_file() {
533 // Construct full path for this file (for blob map lookup)
534 let full_path = if current_path.is_empty() {
535 name_str.clone()
536 } else {
537 format!("{}/{}", current_path, name_str)
538 };
539 file_tasks.push((name_str, path, full_path));
540 } else if metadata.is_dir() {
541 dir_tasks.push((name_str, path));
542 }
543 }
544
545 // Process files concurrently with a limit of 5
546 let file_results: Vec<(Entry<'static>, bool)> = stream::iter(file_tasks)
547 .map(|(name, path, full_path)| async move {
548 let (file_node, reused) = process_file(agent, &path, &full_path, existing_blobs).await?;
549 let entry = Entry::new()
550 .name(CowStr::from(name))
551 .node(EntryNode::File(Box::new(file_node)))
552 .build();
553 Ok::<_, miette::Report>((entry, reused))
554 })
555 .buffer_unordered(5)
556 .collect::<Vec<_>>()
557 .await
558 .into_iter()
559 .collect::<miette::Result<Vec<_>>>()?;
560
561 let mut file_entries = Vec::new();
562 let mut reused_count = 0;
563 let mut total_files = 0;
564
565 for (entry, reused) in file_results {
566 file_entries.push(entry);
567 total_files += 1;
568 if reused {
569 reused_count += 1;
570 }
571 }
572
573 // Process directories recursively (sequentially to avoid too much nesting)
574 let mut dir_entries = Vec::new();
575 for (name, path) in dir_tasks {
576 // Construct full path for subdirectory
577 let subdir_path = if current_path.is_empty() {
578 name.clone()
579 } else {
580 format!("{}/{}", current_path, name)
581 };
582 let (subdir, sub_total, sub_reused) = build_directory(agent, &path, existing_blobs, subdir_path).await?;
583 dir_entries.push(Entry::new()
584 .name(CowStr::from(name))
585 .node(EntryNode::Directory(Box::new(subdir)))
586 .build());
587 total_files += sub_total;
588 reused_count += sub_reused;
589 }
590
591 // Combine file and directory entries
592 let mut entries = file_entries;
593 entries.extend(dir_entries);
594
595 let directory = Directory::new()
596 .r#type(CowStr::from("directory"))
597 .entries(entries)
598 .build();
599
600 Ok((directory, total_files, reused_count))
601 })
602}
603
604/// Process a single file: gzip -> base64 -> upload blob (or reuse existing)
605/// Returns (File, reused: bool)
606/// file_path_key is the full path from the site root (e.g., "config/file.json") for blob map lookup
607///
608/// Special handling: _redirects files are NOT compressed (uploaded as-is)
609async fn process_file(
610 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
611 file_path: &Path,
612 file_path_key: &str,
613 existing_blobs: &HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
614) -> miette::Result<(File<'static>, bool)>
615{
616 // Read file
617 let file_data = std::fs::read(file_path).into_diagnostic()?;
618
619 // Detect original MIME type
620 let original_mime = mime_guess::from_path(file_path)
621 .first_or_octet_stream()
622 .to_string();
623
624 // Check if this is a _redirects file (don't compress it)
625 let is_redirects_file = file_path.file_name()
626 .and_then(|n| n.to_str())
627 .map(|n| n == "_redirects")
628 .unwrap_or(false);
629
630 let (upload_bytes, encoding, is_base64) = if is_redirects_file {
631 // Don't compress _redirects - upload as-is
632 (file_data.clone(), None, false)
633 } else {
634 // Gzip compress
635 let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
636 encoder.write_all(&file_data).into_diagnostic()?;
637 let gzipped = encoder.finish().into_diagnostic()?;
638
639 // Base64 encode the gzipped data
640 let base64_bytes = base64::prelude::BASE64_STANDARD.encode(&gzipped).into_bytes();
641 (base64_bytes, Some("gzip"), true)
642 };
643
644 // Compute CID for this file
645 let file_cid = cid::compute_cid(&upload_bytes);
646
647 // Check if we have an existing blob with the same CID
648 let existing_blob = existing_blobs.get(file_path_key);
649
650 if let Some((existing_blob_ref, existing_cid)) = existing_blob {
651 if existing_cid == &file_cid {
652 // CIDs match - reuse existing blob
653 println!(" ✓ Reusing blob for {} (CID: {})", file_path_key, file_cid);
654 let mut file_builder = File::new()
655 .r#type(CowStr::from("file"))
656 .blob(existing_blob_ref.clone())
657 .mime_type(CowStr::from(original_mime));
658
659 if let Some(enc) = encoding {
660 file_builder = file_builder.encoding(CowStr::from(enc));
661 }
662 if is_base64 {
663 file_builder = file_builder.base64(true);
664 }
665
666 return Ok((file_builder.build(), true));
667 }
668 }
669
670 // File is new or changed - upload it
671 let mime_type = if is_redirects_file {
672 MimeType::new_static("text/plain")
673 } else {
674 MimeType::new_static("application/octet-stream")
675 };
676
677 println!(" ↑ Uploading {} ({} bytes, CID: {})", file_path_key, upload_bytes.len(), file_cid);
678 let blob = agent.upload_blob(upload_bytes, mime_type).await?;
679
680 let mut file_builder = File::new()
681 .r#type(CowStr::from("file"))
682 .blob(blob)
683 .mime_type(CowStr::from(original_mime));
684
685 if let Some(enc) = encoding {
686 file_builder = file_builder.encoding(CowStr::from(enc));
687 }
688 if is_base64 {
689 file_builder = file_builder.base64(true);
690 }
691
692 Ok((file_builder.build(), false))
693}
694
695/// Convert fs::Directory to subfs::Directory
696/// They have the same structure, but different types
697fn convert_fs_dir_to_subfs_dir(fs_dir: place_wisp::fs::Directory<'static>) -> place_wisp::subfs::Directory<'static> {
698 use place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
699
700 let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
701 let node = match entry.node {
702 place_wisp::fs::EntryNode::File(file) => {
703 SubfsEntryNode::File(Box::new(SubfsFile::new()
704 .r#type(file.r#type)
705 .blob(file.blob)
706 .encoding(file.encoding)
707 .mime_type(file.mime_type)
708 .base64(file.base64)
709 .build()))
710 }
711 place_wisp::fs::EntryNode::Directory(dir) => {
712 SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
713 }
714 place_wisp::fs::EntryNode::Subfs(subfs) => {
715 // Nested subfs in the directory we're converting
716 // Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
717 SubfsEntryNode::Subfs(Box::new(place_wisp::subfs::Subfs::new()
718 .r#type(subfs.r#type)
719 .subject(subfs.subject)
720 .build()))
721 }
722 place_wisp::fs::EntryNode::Unknown(unknown) => {
723 SubfsEntryNode::Unknown(unknown)
724 }
725 };
726
727 SubfsEntry::new()
728 .name(entry.name)
729 .node(node)
730 .build()
731 }).collect();
732
733 SubfsDirectory::new()
734 .r#type(fs_dir.r#type)
735 .entries(subfs_entries)
736 .build()
737}
738