Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import type { BlobRef } from "@atproto/api";
2import type { Record, Directory, File, Entry } from "../lexicons/types/place/wisp/fs";
3import { validateRecord } from "../lexicons/types/place/wisp/fs";
4import { gzipSync } from 'zlib';
5import { CID } from 'multiformats/cid';
6import { sha256 } from 'multiformats/hashes/sha2';
7import * as raw from 'multiformats/codecs/raw';
8import { createHash } from 'crypto';
9import * as mf from 'multiformats';
10
11export interface UploadedFile {
12 name: string;
13 content: Buffer;
14 mimeType: string;
15 size: number;
16 compressed?: boolean;
17 base64Encoded?: boolean;
18 originalMimeType?: string;
19}
20
21export interface FileUploadResult {
22 hash: string;
23 blobRef: BlobRef;
24 encoding?: 'gzip';
25 mimeType?: string;
26 base64?: boolean;
27}
28
29export interface ProcessedDirectory {
30 directory: Directory;
31 fileCount: number;
32}
33
34/**
35 * Determine if a file should be gzip compressed based on its MIME type
36 */
37export function shouldCompressFile(mimeType: string): boolean {
38 // Compress text-based files and uncompressed audio formats
39 const compressibleTypes = [
40 'text/html',
41 'text/css',
42 'text/javascript',
43 'application/javascript',
44 'application/json',
45 'image/svg+xml',
46 'text/xml',
47 'application/xml',
48 'text/plain',
49 'application/x-javascript',
50 // Uncompressed audio formats (WAV, AIFF, etc.)
51 'audio/wav',
52 'audio/wave',
53 'audio/x-wav',
54 'audio/aiff',
55 'audio/x-aiff'
56 ];
57
58 // Check if mime type starts with any compressible type
59 return compressibleTypes.some(type => mimeType.startsWith(type));
60}
61
62/**
63 * Compress a file using gzip with deterministic output
64 */
65export function compressFile(content: Buffer): Buffer {
66 return gzipSync(content, {
67 level: 9
68 });
69}
70
71/**
72 * Process uploaded files into a directory structure
73 */
74export function processUploadedFiles(files: UploadedFile[]): ProcessedDirectory {
75 const entries: Entry[] = [];
76 let fileCount = 0;
77
78 // Group files by directory
79 const directoryMap = new Map<string, UploadedFile[]>();
80
81 for (const file of files) {
82 // Skip undefined/null files (defensive)
83 if (!file || !file.name) {
84 console.error('Skipping undefined or invalid file in processUploadedFiles');
85 continue;
86 }
87
88 // Remove any base folder name from the path
89 const normalizedPath = file.name.replace(/^[^\/]*\//, '');
90
91 // Skip files in .git directories
92 if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
93 continue;
94 }
95
96 const parts = normalizedPath.split('/');
97
98 if (parts.length === 1) {
99 // Root level file
100 entries.push({
101 name: parts[0],
102 node: {
103 $type: 'place.wisp.fs#file' as const,
104 type: 'file' as const,
105 blob: undefined as any // Will be filled after upload
106 }
107 });
108 fileCount++;
109 } else {
110 // File in subdirectory
111 const dirPath = parts.slice(0, -1).join('/');
112 if (!directoryMap.has(dirPath)) {
113 directoryMap.set(dirPath, []);
114 }
115 directoryMap.get(dirPath)!.push({
116 ...file,
117 name: normalizedPath
118 });
119 }
120 }
121
122 // Process subdirectories
123 for (const [dirPath, dirFiles] of directoryMap) {
124 const dirEntries: Entry[] = [];
125
126 for (const file of dirFiles) {
127 const fileName = file.name.split('/').pop()!;
128 dirEntries.push({
129 name: fileName,
130 node: {
131 $type: 'place.wisp.fs#file' as const,
132 type: 'file' as const,
133 blob: undefined as any // Will be filled after upload
134 }
135 });
136 fileCount++;
137 }
138
139 // Build nested directory structure
140 const pathParts = dirPath.split('/');
141 let currentEntries = entries;
142
143 for (let i = 0; i < pathParts.length; i++) {
144 const part = pathParts[i];
145 const isLast = i === pathParts.length - 1;
146
147 let existingEntry = currentEntries.find(e => e.name === part);
148
149 if (!existingEntry) {
150 const newDir = {
151 $type: 'place.wisp.fs#directory' as const,
152 type: 'directory' as const,
153 entries: isLast ? dirEntries : []
154 };
155
156 existingEntry = {
157 name: part,
158 node: newDir
159 };
160 currentEntries.push(existingEntry);
161 } else if ('entries' in existingEntry.node && isLast) {
162 (existingEntry.node as any).entries.push(...dirEntries);
163 }
164
165 if (existingEntry && 'entries' in existingEntry.node) {
166 currentEntries = (existingEntry.node as any).entries;
167 }
168 }
169 }
170
171 const result = {
172 directory: {
173 $type: 'place.wisp.fs#directory' as const,
174 type: 'directory' as const,
175 entries
176 },
177 fileCount
178 };
179
180 return result;
181}
182
183/**
184 * Create the manifest record for a site
185 */
186export function createManifest(
187 siteName: string,
188 root: Directory,
189 fileCount: number
190): Record {
191 const manifest = {
192 $type: 'place.wisp.fs' as const,
193 site: siteName,
194 root,
195 fileCount,
196 createdAt: new Date().toISOString()
197 };
198
199 // Validate the manifest before returning
200 const validationResult = validateRecord(manifest);
201 if (!validationResult.success) {
202 throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
203 }
204
205 return manifest;
206}
207
208/**
209 * Update file blobs in directory structure after upload
210 * Uses path-based matching to correctly match files in nested directories
211 * Filters out files that were not successfully uploaded
212 */
213export function updateFileBlobs(
214 directory: Directory,
215 uploadResults: FileUploadResult[],
216 filePaths: string[],
217 currentPath: string = '',
218 successfulPaths?: Set<string>
219): Directory {
220 const updatedEntries = directory.entries.map(entry => {
221 if ('type' in entry.node && entry.node.type === 'file') {
222 // Build the full path for this file
223 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
224
225 // If successfulPaths is provided, skip files that weren't successfully uploaded
226 if (successfulPaths && !successfulPaths.has(fullPath)) {
227 return null; // Filter out failed files
228 }
229
230 // Find exact match in filePaths (need to handle normalized paths)
231 const fileIndex = filePaths.findIndex((path) => {
232 // Normalize both paths by removing leading base folder
233 const normalizedUploadPath = path.replace(/^[^\/]*\//, '');
234 const normalizedEntryPath = fullPath;
235 return normalizedUploadPath === normalizedEntryPath || path === fullPath;
236 });
237
238 if (fileIndex !== -1 && uploadResults[fileIndex]) {
239 const result = uploadResults[fileIndex];
240 const blobRef = result.blobRef;
241
242 return {
243 ...entry,
244 node: {
245 $type: 'place.wisp.fs#file' as const,
246 type: 'file' as const,
247 blob: blobRef,
248 ...(result.encoding && { encoding: result.encoding }),
249 ...(result.mimeType && { mimeType: result.mimeType }),
250 ...(result.base64 && { base64: result.base64 })
251 }
252 };
253 } else {
254 console.error(`Could not find blob for file: ${fullPath}`);
255 return null; // Filter out files without blobs
256 }
257 } else if ('type' in entry.node && entry.node.type === 'directory') {
258 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
259 return {
260 ...entry,
261 node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath, successfulPaths)
262 };
263 }
264 return entry;
265 }).filter(entry => entry !== null) as Entry[]; // Remove null entries (failed files)
266
267 const result = {
268 $type: 'place.wisp.fs#directory' as const,
269 type: 'directory' as const,
270 entries: updatedEntries
271 };
272
273 return result;
274}
275
276/**
277 * Compute CID (Content Identifier) for blob content
278 * Uses the same algorithm as AT Protocol: CIDv1 with raw codec and SHA-256
279 * Based on @atproto/common/src/ipld.ts sha256RawToCid implementation
280 */
281export function computeCID(content: Buffer): string {
282 // Use node crypto to compute sha256 hash (same as AT Protocol)
283 const hash = createHash('sha256').update(content).digest();
284 // Create digest object from hash bytes
285 const digest = mf.digest.create(sha256.code, hash);
286 // Create CIDv1 with raw codec
287 const cid = CID.createV1(raw.code, digest);
288 return cid.toString();
289}
290
291/**
292 * Extract blob information from a directory tree
293 * Returns a map of file paths to their blob refs and CIDs
294 */
295export function extractBlobMap(
296 directory: Directory,
297 currentPath: string = ''
298): Map<string, { blobRef: BlobRef; cid: string }> {
299 const blobMap = new Map<string, { blobRef: BlobRef; cid: string }>();
300
301 for (const entry of directory.entries) {
302 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
303
304 if ('type' in entry.node && entry.node.type === 'file') {
305 const fileNode = entry.node as File;
306 // AT Protocol SDK returns BlobRef class instances, not plain objects
307 // The ref is a CID instance that can be converted to string
308 if (fileNode.blob && fileNode.blob.ref) {
309 const cidString = fileNode.blob.ref.toString();
310 blobMap.set(fullPath, {
311 blobRef: fileNode.blob,
312 cid: cidString
313 });
314 }
315 } else if ('type' in entry.node && entry.node.type === 'directory') {
316 const subMap = extractBlobMap(entry.node as Directory, fullPath);
317 subMap.forEach((value, key) => blobMap.set(key, value));
318 }
319 // Skip subfs nodes - they don't contain blobs in the main tree
320 }
321
322 return blobMap;
323}
324
325/**
326 * Extract all subfs URIs from a directory tree with their mount paths
327 */
328export function extractSubfsUris(
329 directory: Directory,
330 currentPath: string = ''
331): Array<{ uri: string; path: string }> {
332 const uris: Array<{ uri: string; path: string }> = [];
333
334 for (const entry of directory.entries) {
335 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
336
337 if ('type' in entry.node) {
338 if (entry.node.type === 'subfs') {
339 // Subfs node with subject URI
340 const subfsNode = entry.node as any;
341 if (subfsNode.subject) {
342 uris.push({ uri: subfsNode.subject, path: fullPath });
343 }
344 } else if (entry.node.type === 'directory') {
345 // Recursively search subdirectories
346 const subUris = extractSubfsUris(entry.node as Directory, fullPath);
347 uris.push(...subUris);
348 }
349 }
350 }
351
352 return uris;
353}
354
355/**
356 * Estimate the JSON size of a directory tree
357 */
358export function estimateDirectorySize(directory: Directory): number {
359 return JSON.stringify(directory).length;
360}
361
362/**
363 * Count files in a directory tree
364 */
365export function countFilesInDirectory(directory: Directory): number {
366 let count = 0;
367 for (const entry of directory.entries) {
368 if ('type' in entry.node && entry.node.type === 'file') {
369 count++;
370 } else if ('type' in entry.node && entry.node.type === 'directory') {
371 count += countFilesInDirectory(entry.node as Directory);
372 }
373 }
374 return count;
375}
376
377/**
378 * Find all directories in a tree with their paths and sizes
379 */
380export function findLargeDirectories(directory: Directory, currentPath: string = ''): Array<{
381 path: string;
382 directory: Directory;
383 size: number;
384 fileCount: number;
385}> {
386 const result: Array<{ path: string; directory: Directory; size: number; fileCount: number }> = [];
387
388 for (const entry of directory.entries) {
389 if ('type' in entry.node && entry.node.type === 'directory') {
390 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
391 const dir = entry.node as Directory;
392 const size = estimateDirectorySize(dir);
393 const fileCount = countFilesInDirectory(dir);
394
395 result.push({ path: dirPath, directory: dir, size, fileCount });
396
397 // Recursively find subdirectories
398 const subdirs = findLargeDirectories(dir, dirPath);
399 result.push(...subdirs);
400 }
401 }
402
403 return result;
404}
405
406/**
407 * Replace a directory with a subfs node in the tree
408 */
409export function replaceDirectoryWithSubfs(
410 directory: Directory,
411 targetPath: string,
412 subfsUri: string
413): Directory {
414 const pathParts = targetPath.split('/');
415 const targetName = pathParts[pathParts.length - 1];
416 const parentPath = pathParts.slice(0, -1).join('/');
417
418 // If this is a root-level directory
419 if (pathParts.length === 1) {
420 const newEntries = directory.entries.map(entry => {
421 if (entry.name === targetName && 'type' in entry.node && entry.node.type === 'directory') {
422 return {
423 name: entry.name,
424 node: {
425 $type: 'place.wisp.fs#subfs' as const,
426 type: 'subfs' as const,
427 subject: subfsUri,
428 flat: false // Preserve directory structure
429 }
430 };
431 }
432 return entry;
433 });
434
435 return {
436 $type: 'place.wisp.fs#directory' as const,
437 type: 'directory' as const,
438 entries: newEntries
439 };
440 }
441
442 // Recursively navigate to parent directory
443 const newEntries = directory.entries.map(entry => {
444 if ('type' in entry.node && entry.node.type === 'directory') {
445 const entryPath = entry.name;
446 if (parentPath.startsWith(entryPath) || parentPath === entry.name) {
447 const remainingPath = pathParts.slice(1).join('/');
448 return {
449 name: entry.name,
450 node: {
451 ...replaceDirectoryWithSubfs(entry.node as Directory, remainingPath, subfsUri),
452 $type: 'place.wisp.fs#directory' as const
453 }
454 };
455 }
456 }
457 return entry;
458 });
459
460 return {
461 $type: 'place.wisp.fs#directory' as const,
462 type: 'directory' as const,
463 entries: newEntries
464 };
465}