Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1mod builder_types;
2mod place_wisp;
3mod cid;
4mod blob_map;
5mod metadata;
6mod download;
7mod pull;
8mod serve;
9mod subfs_utils;
10mod redirects;
11mod ignore_patterns;
12
13use clap::{Parser, Subcommand};
14use jacquard::CowStr;
15use jacquard::client::{Agent, FileAuthStore, AgentSessionExt, MemoryCredentialSession, AgentSession};
16use jacquard::oauth::client::OAuthClient;
17use jacquard::oauth::loopback::LoopbackConfig;
18use jacquard::prelude::IdentityResolver;
19use jacquard_common::types::string::{Datetime, Rkey, RecordKey, AtUri};
20use jacquard_common::types::blob::MimeType;
21use miette::IntoDiagnostic;
22use std::path::{Path, PathBuf};
23use std::collections::HashMap;
24use flate2::Compression;
25use flate2::write::GzEncoder;
26use std::io::Write;
27use base64::Engine;
28use futures::stream::{self, StreamExt};
29use indicatif::{ProgressBar, ProgressStyle, MultiProgress};
30
31use place_wisp::fs::*;
32use place_wisp::settings::*;
33
34/// Maximum number of concurrent file uploads to the PDS
35const MAX_CONCURRENT_UPLOADS: usize = 2;
36
37/// Limits for caching on wisp.place (from @wisp/constants)
38const MAX_FILE_COUNT: usize = 1000;
39const MAX_SITE_SIZE: usize = 300 * 1024 * 1024; // 300MB
40
41#[derive(Parser, Debug)]
42#[command(author, version, about = "wisp.place CLI tool")]
43struct Args {
44 #[command(subcommand)]
45 command: Option<Commands>,
46
47 // Deploy arguments (when no subcommand is specified)
48 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
49 #[arg(global = true, conflicts_with = "command")]
50 input: Option<CowStr<'static>>,
51
52 /// Path to the directory containing your static site
53 #[arg(short, long, global = true, conflicts_with = "command")]
54 path: Option<PathBuf>,
55
56 /// Site name (defaults to directory name)
57 #[arg(short, long, global = true, conflicts_with = "command")]
58 site: Option<String>,
59
60 /// Path to auth store file
61 #[arg(long, global = true, conflicts_with = "command")]
62 store: Option<String>,
63
64 /// App Password for authentication
65 #[arg(long, global = true, conflicts_with = "command")]
66 password: Option<CowStr<'static>>,
67
68 /// Enable directory listing mode for paths without index files
69 #[arg(long, global = true, conflicts_with = "command")]
70 directory: bool,
71
72 /// Enable SPA mode (serve index.html for all routes)
73 #[arg(long, global = true, conflicts_with = "command")]
74 spa: bool,
75
76 /// Skip confirmation prompts (automatically accept warnings)
77 #[arg(short = 'y', long, global = true, conflicts_with = "command")]
78 yes: bool,
79}
80
81#[derive(Subcommand, Debug)]
82enum Commands {
83 /// Deploy a static site to wisp.place (default command)
84 Deploy {
85 /// Handle (e.g., alice.bsky.social), DID, or PDS URL
86 input: CowStr<'static>,
87
88 /// Path to the directory containing your static site
89 #[arg(short, long, default_value = ".")]
90 path: PathBuf,
91
92 /// Site name (defaults to directory name)
93 #[arg(short, long)]
94 site: Option<String>,
95
96 /// Path to auth store file (will be created if missing, only used with OAuth)
97 #[arg(long, default_value = "/tmp/wisp-oauth-session.json")]
98 store: String,
99
100 /// App Password for authentication (alternative to OAuth)
101 #[arg(long)]
102 password: Option<CowStr<'static>>,
103
104 /// Enable directory listing mode for paths without index files
105 #[arg(long)]
106 directory: bool,
107
108 /// Enable SPA mode (serve index.html for all routes)
109 #[arg(long)]
110 spa: bool,
111
112 /// Skip confirmation prompts (automatically accept warnings)
113 #[arg(short = 'y', long)]
114 yes: bool,
115 },
116 /// Pull a site from the PDS to a local directory
117 Pull {
118 /// Handle (e.g., alice.bsky.social) or DID
119 input: CowStr<'static>,
120
121 /// Site name (record key)
122 #[arg(short, long)]
123 site: String,
124
125 /// Output directory for the downloaded site
126 #[arg(short, long, default_value = ".")]
127 output: PathBuf,
128 },
129 /// Serve a site locally with real-time firehose updates
130 Serve {
131 /// Handle (e.g., alice.bsky.social) or DID
132 input: CowStr<'static>,
133
134 /// Site name (record key)
135 #[arg(short, long)]
136 site: String,
137
138 /// Output directory for the site files
139 #[arg(short, long, default_value = ".")]
140 output: PathBuf,
141
142 /// Port to serve on
143 #[arg(short, long, default_value = "8080")]
144 port: u16,
145 },
146}
147
148#[tokio::main]
149async fn main() -> miette::Result<()> {
150 let args = Args::parse();
151
152 let result = match args.command {
153 Some(Commands::Deploy { input, path, site, store, password, directory, spa, yes }) => {
154 // Dispatch to appropriate authentication method
155 if let Some(password) = password {
156 run_with_app_password(input, password, path, site, directory, spa, yes).await
157 } else {
158 run_with_oauth(input, store, path, site, directory, spa, yes).await
159 }
160 }
161 Some(Commands::Pull { input, site, output }) => {
162 pull::pull_site(input, CowStr::from(site), output).await
163 }
164 Some(Commands::Serve { input, site, output, port }) => {
165 serve::serve_site(input, CowStr::from(site), output, port).await
166 }
167 None => {
168 // Legacy mode: if input is provided, assume deploy command
169 if let Some(input) = args.input {
170 let path = args.path.unwrap_or_else(|| PathBuf::from("."));
171 let store = args.store.unwrap_or_else(|| "/tmp/wisp-oauth-session.json".to_string());
172
173 // Dispatch to appropriate authentication method
174 if let Some(password) = args.password {
175 run_with_app_password(input, password, path, args.site, args.directory, args.spa, args.yes).await
176 } else {
177 run_with_oauth(input, store, path, args.site, args.directory, args.spa, args.yes).await
178 }
179 } else {
180 // No command and no input, show help
181 use clap::CommandFactory;
182 Args::command().print_help().into_diagnostic()?;
183 Ok(())
184 }
185 }
186 };
187
188 // Force exit to avoid hanging on background tasks/connections
189 match result {
190 Ok(_) => std::process::exit(0),
191 Err(e) => {
192 eprintln!("{:?}", e);
193 std::process::exit(1)
194 }
195 }
196}
197
198/// Run deployment with app password authentication
199async fn run_with_app_password(
200 input: CowStr<'static>,
201 password: CowStr<'static>,
202 path: PathBuf,
203 site: Option<String>,
204 directory: bool,
205 spa: bool,
206 yes: bool,
207) -> miette::Result<()> {
208 let (session, auth) =
209 MemoryCredentialSession::authenticated(input, password, None, None).await?;
210 println!("Signed in as {}", auth.handle);
211
212 let agent: Agent<_> = Agent::from(session);
213 deploy_site(&agent, path, site, directory, spa, yes).await
214}
215
216/// Run deployment with OAuth authentication
217async fn run_with_oauth(
218 input: CowStr<'static>,
219 store: String,
220 path: PathBuf,
221 site: Option<String>,
222 directory: bool,
223 spa: bool,
224 yes: bool,
225) -> miette::Result<()> {
226 use jacquard::oauth::scopes::Scope;
227 use jacquard::oauth::atproto::AtprotoClientMetadata;
228 use jacquard::oauth::session::ClientData;
229 use url::Url;
230
231 // Request the necessary scopes for wisp.place (including settings)
232 let scopes = Scope::parse_multiple("atproto repo:place.wisp.fs repo:place.wisp.subfs repo:place.wisp.settings blob:*/*")
233 .map_err(|e| miette::miette!("Failed to parse scopes: {:?}", e))?;
234
235 // Create redirect URIs that match the loopback server (port 4000, path /oauth/callback)
236 let redirect_uris = vec![
237 Url::parse("http://127.0.0.1:4000/oauth/callback").into_diagnostic()?,
238 Url::parse("http://[::1]:4000/oauth/callback").into_diagnostic()?,
239 ];
240
241 // Create client metadata with matching redirect URIs and scopes
242 let client_data = ClientData {
243 keyset: None,
244 config: AtprotoClientMetadata::new_localhost(
245 Some(redirect_uris),
246 Some(scopes),
247 ),
248 };
249
250 let oauth = OAuthClient::new(FileAuthStore::new(&store), client_data);
251
252 let session = oauth
253 .login_with_local_server(input, Default::default(), LoopbackConfig::default())
254 .await?;
255
256 let agent: Agent<_> = Agent::from(session);
257 deploy_site(&agent, path, site, directory, spa, yes).await
258}
259
260/// Scan directory to count files and calculate total size
261/// Returns (file_count, total_size_bytes)
262fn scan_directory_stats(
263 dir_path: &Path,
264 ignore_matcher: &ignore_patterns::IgnoreMatcher,
265 current_path: String,
266) -> miette::Result<(usize, u64)> {
267 let mut file_count = 0;
268 let mut total_size = 0u64;
269
270 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
271 .into_diagnostic()?
272 .collect::<Result<Vec<_>, _>>()
273 .into_diagnostic()?;
274
275 for entry in dir_entries {
276 let path = entry.path();
277 let name = entry.file_name();
278 let name_str = name.to_str()
279 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
280 .to_string();
281
282 let full_path = if current_path.is_empty() {
283 name_str.clone()
284 } else {
285 format!("{}/{}", current_path, name_str)
286 };
287
288 // Skip files/directories that match ignore patterns
289 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
290 continue;
291 }
292
293 let metadata = entry.metadata().into_diagnostic()?;
294
295 if metadata.is_file() {
296 file_count += 1;
297 total_size += metadata.len();
298 } else if metadata.is_dir() {
299 let subdir_path = if current_path.is_empty() {
300 name_str
301 } else {
302 format!("{}/{}", current_path, name_str)
303 };
304 let (sub_count, sub_size) = scan_directory_stats(&path, ignore_matcher, subdir_path)?;
305 file_count += sub_count;
306 total_size += sub_size;
307 }
308 }
309
310 Ok((file_count, total_size))
311}
312
313/// Deploy the site using the provided agent
314async fn deploy_site(
315 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
316 path: PathBuf,
317 site: Option<String>,
318 directory_listing: bool,
319 spa_mode: bool,
320 skip_prompts: bool,
321) -> miette::Result<()> {
322 // Verify the path exists
323 if !path.exists() {
324 return Err(miette::miette!("Path does not exist: {}", path.display()));
325 }
326
327 // Get site name
328 let site_name = site.unwrap_or_else(|| {
329 path
330 .file_name()
331 .and_then(|n| n.to_str())
332 .unwrap_or("site")
333 .to_string()
334 });
335
336 println!("Deploying site '{}'...", site_name);
337
338 // Scan directory to check file count and size
339 let ignore_matcher = ignore_patterns::IgnoreMatcher::new(&path)?;
340 let (file_count, total_size) = scan_directory_stats(&path, &ignore_matcher, String::new())?;
341
342 let size_mb = total_size as f64 / (1024.0 * 1024.0);
343 println!("Scanned: {} files, {:.1} MB total", file_count, size_mb);
344
345 // Check if limits are exceeded
346 let exceeds_file_count = file_count > MAX_FILE_COUNT;
347 let exceeds_size = total_size > MAX_SITE_SIZE as u64;
348
349 if exceeds_file_count || exceeds_size {
350 println!("\n⚠️ Warning: Your site exceeds wisp.place caching limits:");
351
352 if exceeds_file_count {
353 println!(" • File count: {} (limit: {})", file_count, MAX_FILE_COUNT);
354 }
355
356 if exceeds_size {
357 let size_mb = total_size as f64 / (1024.0 * 1024.0);
358 let limit_mb = MAX_SITE_SIZE as f64 / (1024.0 * 1024.0);
359 println!(" • Total size: {:.1} MB (limit: {:.0} MB)", size_mb, limit_mb);
360 }
361
362 println!("\nwisp.place will NOT serve your site if you proceed.");
363 println!("Your site will be uploaded to your PDS, but will only be accessible via:");
364 println!(" • wisp-cli serve (local hosting)");
365 println!(" • Other hosting services with more generous limits");
366
367 if !skip_prompts {
368 // Prompt for confirmation
369 use std::io::{self, Write};
370 print!("\nDo you want to upload anyway? (y/N): ");
371 io::stdout().flush().into_diagnostic()?;
372
373 let mut input = String::new();
374 io::stdin().read_line(&mut input).into_diagnostic()?;
375 let input = input.trim().to_lowercase();
376
377 if input != "y" && input != "yes" {
378 println!("Upload cancelled.");
379 return Ok(());
380 }
381 } else {
382 println!("\nSkipping confirmation (--yes flag set).");
383 }
384
385 println!("\nProceeding with upload...\n");
386 }
387
388 // Try to fetch existing manifest for incremental updates
389 let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
390 use jacquard_common::types::string::AtUri;
391
392 // Get the DID for this session
393 let session_info = agent.session_info().await;
394 if let Some((did, _)) = session_info {
395 // Construct the AT URI for the record
396 let uri_string = format!("at://{}/place.wisp.fs/{}", did, site_name);
397 if let Ok(uri) = AtUri::new(&uri_string) {
398 match agent.get_record::<Fs>(&uri).await {
399 Ok(response) => {
400 match response.into_output() {
401 Ok(record_output) => {
402 let existing_manifest = record_output.value;
403 let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
404 println!("Found existing manifest with {} files in main record", blob_map.len());
405
406 // Extract subfs URIs from main record
407 let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
408
409 if !subfs_uris.is_empty() {
410 println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
411
412 // Merge blob maps from all subfs records
413 match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
414 Ok(merged_count) => {
415 println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
416 }
417 Err(e) => {
418 eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
419 }
420 }
421
422 (blob_map, subfs_uris)
423 } else {
424 (blob_map, Vec::new())
425 }
426 }
427 Err(_) => {
428 println!("No existing manifest found, uploading all files...");
429 (HashMap::new(), Vec::new())
430 }
431 }
432 }
433 Err(_) => {
434 // Record doesn't exist yet - this is a new site
435 println!("No existing manifest found, uploading all files...");
436 (HashMap::new(), Vec::new())
437 }
438 }
439 } else {
440 println!("No existing manifest found (invalid URI), uploading all files...");
441 (HashMap::new(), Vec::new())
442 }
443 } else {
444 println!("No existing manifest found (could not get DID), uploading all files...");
445 (HashMap::new(), Vec::new())
446 }
447 };
448
449 // Create progress tracking (spinner style since we don't know total count upfront)
450 let multi_progress = MultiProgress::new();
451 let progress = multi_progress.add(ProgressBar::new_spinner());
452 progress.set_style(
453 ProgressStyle::default_spinner()
454 .template("[{elapsed_precise}] {spinner:.cyan} {pos} files {msg}")
455 .into_diagnostic()?
456 .tick_chars("⠁⠂⠄⡀⢀⠠⠐⠈ ")
457 );
458 progress.set_message("Scanning files...");
459 progress.enable_steady_tick(std::time::Duration::from_millis(100));
460
461 let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new(), &ignore_matcher, &progress).await?;
462 let uploaded_count = total_files - reused_count;
463
464 progress.finish_with_message(format!("✓ {} files ({} uploaded, {} reused)", total_files, uploaded_count, reused_count));
465
466 // Check if we need to split into subfs records
467 const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
468 const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
469 const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
470
471 let mut working_directory = root_dir;
472 let mut current_file_count = total_files;
473 let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
474
475 // Estimate initial manifest size
476 let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
477
478 if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
479 println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
480 total_files, manifest_size as f64 / 1024.0);
481
482 let mut attempts = 0;
483 const MAX_SPLIT_ATTEMPTS: usize = 50;
484
485 while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
486 attempts += 1;
487
488 // Find large directories to split
489 let directories = subfs_utils::find_large_directories(&working_directory, String::new());
490
491 if let Some(largest_dir) = directories.first() {
492 println!(" Split #{}: {} ({} files, {:.1}KB)",
493 attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
494
495 // Check if this directory is itself too large for a single subfs record
496 const MAX_SUBFS_SIZE: usize = 75 * 1024; // 75KB soft limit for safety
497 let mut subfs_uri = String::new();
498
499 if largest_dir.size > MAX_SUBFS_SIZE {
500 // Need to split this directory into multiple chunks
501 println!(" → Directory too large, splitting into chunks...");
502 let chunks = subfs_utils::split_directory_into_chunks(&largest_dir.directory, MAX_SUBFS_SIZE);
503 println!(" → Created {} chunks", chunks.len());
504
505 // Upload each chunk as a subfs record
506 let mut chunk_uris = Vec::new();
507 for (i, chunk) in chunks.iter().enumerate() {
508 use jacquard_common::types::string::Tid;
509 let chunk_tid = Tid::now_0();
510 let chunk_rkey = chunk_tid.to_string();
511
512 let chunk_file_count = subfs_utils::count_files_in_directory(chunk);
513 let chunk_size = subfs_utils::estimate_directory_size(chunk);
514
515 let chunk_manifest = crate::place_wisp::subfs::SubfsRecord::new()
516 .root(convert_fs_dir_to_subfs_dir(chunk.clone()))
517 .file_count(Some(chunk_file_count as i64))
518 .created_at(Datetime::now())
519 .build();
520
521 println!(" → Uploading chunk {}/{} ({} files, {:.1}KB)...",
522 i + 1, chunks.len(), chunk_file_count, chunk_size as f64 / 1024.0);
523
524 let chunk_output = agent.put_record(
525 RecordKey::from(Rkey::new(&chunk_rkey).into_diagnostic()?),
526 chunk_manifest
527 ).await.into_diagnostic()?;
528
529 let chunk_uri = chunk_output.uri.to_string();
530 chunk_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
531 new_subfs_uris.push((chunk_uri.clone(), format!("{}#{}", largest_dir.path, i)));
532 }
533
534 // Create a parent subfs record that references all chunks
535 // Each chunk reference MUST have flat: true to merge chunk contents
536 println!(" → Creating parent subfs with {} chunk references...", chunk_uris.len());
537 use jacquard_common::CowStr;
538 use crate::place_wisp::fs::{Subfs};
539
540 // Convert to fs::Subfs (which has the 'flat' field) instead of subfs::Subfs
541 let parent_entries_fs: Vec<Entry> = chunk_uris.iter().enumerate().map(|(i, (uri, _))| {
542 let uri_string = uri.clone();
543 let at_uri = AtUri::new_cow(CowStr::from(uri_string)).expect("valid URI");
544 Entry::new()
545 .name(CowStr::from(format!("chunk{}", i)))
546 .node(EntryNode::Subfs(Box::new(
547 Subfs::new()
548 .r#type(CowStr::from("subfs"))
549 .subject(at_uri)
550 .flat(Some(true)) // EXPLICITLY TRUE - merge chunk contents
551 .build()
552 )))
553 .build()
554 }).collect();
555
556 let parent_root_fs = Directory::new()
557 .r#type(CowStr::from("directory"))
558 .entries(parent_entries_fs)
559 .build();
560
561 // Convert to subfs::Directory for the parent subfs record
562 let parent_root_subfs = convert_fs_dir_to_subfs_dir(parent_root_fs);
563
564 use jacquard_common::types::string::Tid;
565 let parent_tid = Tid::now_0();
566 let parent_rkey = parent_tid.to_string();
567
568 let parent_manifest = crate::place_wisp::subfs::SubfsRecord::new()
569 .root(parent_root_subfs)
570 .file_count(Some(largest_dir.file_count as i64))
571 .created_at(Datetime::now())
572 .build();
573
574 let parent_output = agent.put_record(
575 RecordKey::from(Rkey::new(&parent_rkey).into_diagnostic()?),
576 parent_manifest
577 ).await.into_diagnostic()?;
578
579 subfs_uri = parent_output.uri.to_string();
580 println!(" ✅ Created parent subfs with chunks (flat=true on each chunk): {}", subfs_uri);
581 } else {
582 // Directory fits in a single subfs record
583 use jacquard_common::types::string::Tid;
584 let subfs_tid = Tid::now_0();
585 let subfs_rkey = subfs_tid.to_string();
586
587 let subfs_manifest = crate::place_wisp::subfs::SubfsRecord::new()
588 .root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
589 .file_count(Some(largest_dir.file_count as i64))
590 .created_at(Datetime::now())
591 .build();
592
593 // Upload subfs record
594 let subfs_output = agent.put_record(
595 RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
596 subfs_manifest
597 ).await.into_diagnostic()?;
598
599 subfs_uri = subfs_output.uri.to_string();
600 println!(" ✅ Created subfs: {}", subfs_uri);
601 }
602
603 // Replace directory with subfs node (flat: false to preserve directory structure)
604 working_directory = subfs_utils::replace_directory_with_subfs(
605 working_directory,
606 &largest_dir.path,
607 &subfs_uri,
608 false // Preserve directory - the chunks inside have flat=true
609 )?;
610
611 new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
612 current_file_count -= largest_dir.file_count;
613
614 // Recalculate manifest size
615 manifest_size = subfs_utils::estimate_directory_size(&working_directory);
616 println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
617 manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
618
619 if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
620 println!("✅ Manifest now fits within limits");
621 break;
622 }
623 } else {
624 println!(" No more subdirectories to split - stopping");
625 break;
626 }
627 }
628
629 if attempts >= MAX_SPLIT_ATTEMPTS {
630 return Err(miette::miette!(
631 "Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
632 MAX_SPLIT_ATTEMPTS,
633 manifest_size as f64 / 1024.0,
634 current_file_count
635 ));
636 }
637
638 println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
639 new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
640 } else {
641 println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
642 total_files, manifest_size as f64 / 1024.0);
643 }
644
645 // Create the final Fs record
646 let fs_record = Fs::new()
647 .site(CowStr::from(site_name.clone()))
648 .root(working_directory)
649 .file_count(current_file_count as i64)
650 .created_at(Datetime::now())
651 .build();
652
653 // Use site name as the record key
654 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
655 let output = agent.put_record(RecordKey::from(rkey), fs_record).await?;
656
657 // Extract DID from the AT URI (format: at://did:plc:xxx/collection/rkey)
658 let uri_str = output.uri.to_string();
659 let did = uri_str
660 .strip_prefix("at://")
661 .and_then(|s| s.split('/').next())
662 .ok_or_else(|| miette::miette!("Failed to parse DID from URI"))?;
663
664 println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
665 println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
666 println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
667
668 // Clean up old subfs records
669 if !old_subfs_uris.is_empty() {
670 println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
671
672 let mut deleted_count = 0;
673 let mut failed_count = 0;
674
675 for (uri, _path) in old_subfs_uris {
676 match subfs_utils::delete_subfs_record(agent, &uri).await {
677 Ok(_) => {
678 deleted_count += 1;
679 println!(" 🗑️ Deleted old subfs: {}", uri);
680 }
681 Err(e) => {
682 failed_count += 1;
683 eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
684 }
685 }
686 }
687
688 if failed_count > 0 {
689 eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
690 } else {
691 println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
692 }
693 }
694
695 // Upload settings if either flag is set
696 if directory_listing || spa_mode {
697 // Validate mutual exclusivity
698 if directory_listing && spa_mode {
699 return Err(miette::miette!("Cannot enable both --directory and --SPA modes"));
700 }
701
702 println!("\n⚙️ Uploading site settings...");
703
704 // Build settings record
705 let mut settings_builder = Settings::new();
706
707 if directory_listing {
708 settings_builder = settings_builder.directory_listing(Some(true));
709 println!(" • Directory listing: enabled");
710 }
711
712 if spa_mode {
713 settings_builder = settings_builder.spa_mode(Some(CowStr::from("index.html")));
714 println!(" • SPA mode: enabled (serving index.html for all routes)");
715 }
716
717 let settings_record = settings_builder.build();
718
719 // Upload settings record with same rkey as site
720 let rkey = Rkey::new(&site_name).map_err(|e| miette::miette!("Invalid rkey: {}", e))?;
721 match agent.put_record(RecordKey::from(rkey), settings_record).await {
722 Ok(settings_output) => {
723 println!("✅ Settings uploaded: {}", settings_output.uri);
724 }
725 Err(e) => {
726 eprintln!("⚠️ Failed to upload settings: {}", e);
727 eprintln!(" Site was deployed successfully, but settings may need to be configured manually.");
728 }
729 }
730 }
731
732 Ok(())
733}
734
735/// Recursively build a Directory from a filesystem path
736/// current_path is the path from the root of the site (e.g., "" for root, "config" for config dir)
737fn build_directory<'a>(
738 agent: &'a Agent<impl jacquard::client::AgentSession + IdentityResolver + 'a>,
739 dir_path: &'a Path,
740 existing_blobs: &'a HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
741 current_path: String,
742 ignore_matcher: &'a ignore_patterns::IgnoreMatcher,
743 progress: &'a ProgressBar,
744) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<(Directory<'static>, usize, usize)>> + 'a>>
745{
746 Box::pin(async move {
747 // Collect all directory entries first
748 let dir_entries: Vec<_> = std::fs::read_dir(dir_path)
749 .into_diagnostic()?
750 .collect::<Result<Vec<_>, _>>()
751 .into_diagnostic()?;
752
753 // Separate files and directories
754 let mut file_tasks = Vec::new();
755 let mut dir_tasks = Vec::new();
756
757 for entry in dir_entries {
758 let path = entry.path();
759 let name = entry.file_name();
760 let name_str = name.to_str()
761 .ok_or_else(|| miette::miette!("Invalid filename: {:?}", name))?
762 .to_string();
763
764 // Construct full path for ignore checking
765 let full_path = if current_path.is_empty() {
766 name_str.clone()
767 } else {
768 format!("{}/{}", current_path, name_str)
769 };
770
771 // Skip files/directories that match ignore patterns
772 if ignore_matcher.is_ignored(&full_path) || ignore_matcher.is_filename_ignored(&name_str) {
773 continue;
774 }
775
776 let metadata = entry.metadata().into_diagnostic()?;
777
778 if metadata.is_file() {
779 // Construct full path for this file (for blob map lookup)
780 let full_path = if current_path.is_empty() {
781 name_str.clone()
782 } else {
783 format!("{}/{}", current_path, name_str)
784 };
785 file_tasks.push((name_str, path, full_path));
786 } else if metadata.is_dir() {
787 dir_tasks.push((name_str, path));
788 }
789 }
790
791 // Process files concurrently with a limit of 2
792 let file_results: Vec<(Entry<'static>, bool)> = stream::iter(file_tasks)
793 .map(|(name, path, full_path)| async move {
794 let (file_node, reused) = process_file(agent, &path, &full_path, existing_blobs, progress).await?;
795 progress.inc(1);
796 let entry = Entry::new()
797 .name(CowStr::from(name))
798 .node(EntryNode::File(Box::new(file_node)))
799 .build();
800 Ok::<_, miette::Report>((entry, reused))
801 })
802 .buffer_unordered(MAX_CONCURRENT_UPLOADS)
803 .collect::<Vec<_>>()
804 .await
805 .into_iter()
806 .collect::<miette::Result<Vec<_>>>()?;
807
808 let mut file_entries = Vec::new();
809 let mut reused_count = 0;
810 let mut total_files = 0;
811
812 for (entry, reused) in file_results {
813 file_entries.push(entry);
814 total_files += 1;
815 if reused {
816 reused_count += 1;
817 }
818 }
819
820 // Process directories recursively (sequentially to avoid too much nesting)
821 let mut dir_entries = Vec::new();
822 for (name, path) in dir_tasks {
823 // Construct full path for subdirectory
824 let subdir_path = if current_path.is_empty() {
825 name.clone()
826 } else {
827 format!("{}/{}", current_path, name)
828 };
829 let (subdir, sub_total, sub_reused) = build_directory(agent, &path, existing_blobs, subdir_path, ignore_matcher, progress).await?;
830 dir_entries.push(Entry::new()
831 .name(CowStr::from(name))
832 .node(EntryNode::Directory(Box::new(subdir)))
833 .build());
834 total_files += sub_total;
835 reused_count += sub_reused;
836 }
837
838 // Combine file and directory entries
839 let mut entries = file_entries;
840 entries.extend(dir_entries);
841
842 let directory = Directory::new()
843 .r#type(CowStr::from("directory"))
844 .entries(entries)
845 .build();
846
847 Ok((directory, total_files, reused_count))
848 })
849}
850
851/// Process a single file: gzip -> base64 -> upload blob (or reuse existing)
852/// Returns (File, reused: bool)
853/// file_path_key is the full path from the site root (e.g., "config/file.json") for blob map lookup
854///
855/// Special handling: _redirects files are NOT compressed (uploaded as-is)
856async fn process_file(
857 agent: &Agent<impl jacquard::client::AgentSession + IdentityResolver>,
858 file_path: &Path,
859 file_path_key: &str,
860 existing_blobs: &HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>,
861 progress: &ProgressBar,
862) -> miette::Result<(File<'static>, bool)>
863{
864 // Read file
865 let file_data = std::fs::read(file_path).into_diagnostic()?;
866
867 // Detect original MIME type
868 let original_mime = mime_guess::from_path(file_path)
869 .first_or_octet_stream()
870 .to_string();
871
872 // Check if this is a _redirects file (don't compress it)
873 let is_redirects_file = file_path.file_name()
874 .and_then(|n| n.to_str())
875 .map(|n| n == "_redirects")
876 .unwrap_or(false);
877
878 let (upload_bytes, encoding, is_base64) = if is_redirects_file {
879 // Don't compress _redirects - upload as-is
880 (file_data.clone(), None, false)
881 } else {
882 // Gzip compress
883 let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
884 encoder.write_all(&file_data).into_diagnostic()?;
885 let gzipped = encoder.finish().into_diagnostic()?;
886
887 // Base64 encode the gzipped data
888 let base64_bytes = base64::prelude::BASE64_STANDARD.encode(&gzipped).into_bytes();
889 (base64_bytes, Some("gzip"), true)
890 };
891
892 // Compute CID for this file
893 let file_cid = cid::compute_cid(&upload_bytes);
894
895 // Check if we have an existing blob with the same CID
896 let existing_blob = existing_blobs.get(file_path_key);
897
898 if let Some((existing_blob_ref, existing_cid)) = existing_blob {
899 if existing_cid == &file_cid {
900 // CIDs match - reuse existing blob
901 progress.set_message(format!("✓ Reused {}", file_path_key));
902 let mut file_builder = File::new()
903 .r#type(CowStr::from("file"))
904 .blob(existing_blob_ref.clone())
905 .mime_type(CowStr::from(original_mime));
906
907 if let Some(enc) = encoding {
908 file_builder = file_builder.encoding(CowStr::from(enc));
909 }
910 if is_base64 {
911 file_builder = file_builder.base64(true);
912 }
913
914 return Ok((file_builder.build(), true));
915 }
916 }
917
918 // File is new or changed - upload it
919 let mime_type = if is_redirects_file {
920 MimeType::new_static("text/plain")
921 } else {
922 MimeType::new_static("application/octet-stream")
923 };
924
925 // Format file size nicely
926 let size_str = if upload_bytes.len() < 1024 {
927 format!("{} B", upload_bytes.len())
928 } else if upload_bytes.len() < 1024 * 1024 {
929 format!("{:.1} KB", upload_bytes.len() as f64 / 1024.0)
930 } else {
931 format!("{:.1} MB", upload_bytes.len() as f64 / (1024.0 * 1024.0))
932 };
933
934 progress.set_message(format!("↑ Uploading {} ({})", file_path_key, size_str));
935 let blob = agent.upload_blob(upload_bytes, mime_type).await?;
936 progress.set_message(format!("✓ Uploaded {}", file_path_key));
937
938 let mut file_builder = File::new()
939 .r#type(CowStr::from("file"))
940 .blob(blob)
941 .mime_type(CowStr::from(original_mime));
942
943 if let Some(enc) = encoding {
944 file_builder = file_builder.encoding(CowStr::from(enc));
945 }
946 if is_base64 {
947 file_builder = file_builder.base64(true);
948 }
949
950 Ok((file_builder.build(), false))
951}
952
953/// Convert fs::Directory to subfs::Directory
954/// They have the same structure, but different types
955fn convert_fs_dir_to_subfs_dir(fs_dir: place_wisp::fs::Directory<'static>) -> place_wisp::subfs::Directory<'static> {
956 use place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
957
958 let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
959 let node = match entry.node {
960 place_wisp::fs::EntryNode::File(file) => {
961 SubfsEntryNode::File(Box::new(SubfsFile::new()
962 .r#type(file.r#type)
963 .blob(file.blob)
964 .encoding(file.encoding)
965 .mime_type(file.mime_type)
966 .base64(file.base64)
967 .build()))
968 }
969 place_wisp::fs::EntryNode::Directory(dir) => {
970 SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
971 }
972 place_wisp::fs::EntryNode::Subfs(subfs) => {
973 // Nested subfs in the directory we're converting
974 // Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
975 SubfsEntryNode::Subfs(Box::new(place_wisp::subfs::Subfs::new()
976 .r#type(subfs.r#type)
977 .subject(subfs.subject)
978 .build()))
979 }
980 place_wisp::fs::EntryNode::Unknown(unknown) => {
981 SubfsEntryNode::Unknown(unknown)
982 }
983 };
984
985 SubfsEntry::new()
986 .name(entry.name)
987 .node(node)
988 .build()
989 }).collect();
990
991 SubfsDirectory::new()
992 .r#type(fs_dir.r#type)
993 .entries(subfs_entries)
994 .build()
995}
996