Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import type { BlobRef } from "@atproto/api";
2import type { Record, Directory, File, Entry } from "../lexicons/types/place/wisp/fs";
3import { validateRecord } from "../lexicons/types/place/wisp/fs";
4import { gzipSync } from 'zlib';
5import { CID } from 'multiformats/cid';
6import { sha256 } from 'multiformats/hashes/sha2';
7import * as raw from 'multiformats/codecs/raw';
8import { createHash } from 'crypto';
9import * as mf from 'multiformats';
10
11export interface UploadedFile {
12 name: string;
13 content: Buffer;
14 mimeType: string;
15 size: number;
16 compressed?: boolean;
17 base64Encoded?: boolean;
18 originalMimeType?: string;
19}
20
21export interface FileUploadResult {
22 hash: string;
23 blobRef: BlobRef;
24 encoding?: 'gzip';
25 mimeType?: string;
26 base64?: boolean;
27}
28
29export interface ProcessedDirectory {
30 directory: Directory;
31 fileCount: number;
32}
33
34/**
35 * Determine if a file should be gzip compressed based on its MIME type and filename
36 */
37export function shouldCompressFile(mimeType: string, fileName?: string): boolean {
38 // Never compress _redirects file - it needs to be plain text for the hosting service
39 if (fileName && (fileName.endsWith('/_redirects') || fileName === '_redirects')) {
40 return false;
41 }
42
43 // Compress text-based files and uncompressed audio formats
44 const compressibleTypes = [
45 'text/html',
46 'text/css',
47 'text/javascript',
48 'application/javascript',
49 'application/json',
50 'image/svg+xml',
51 'text/xml',
52 'application/xml',
53 'text/plain',
54 'application/x-javascript',
55 // Uncompressed audio formats (WAV, AIFF, etc.)
56 'audio/wav',
57 'audio/wave',
58 'audio/x-wav',
59 'audio/aiff',
60 'audio/x-aiff'
61 ];
62
63 // Check if mime type starts with any compressible type
64 return compressibleTypes.some(type => mimeType.startsWith(type));
65}
66
67/**
68 * Compress a file using gzip with deterministic output
69 */
70export function compressFile(content: Buffer): Buffer {
71 return gzipSync(content, {
72 level: 9
73 });
74}
75
76/**
77 * Process uploaded files into a directory structure
78 */
79export function processUploadedFiles(files: UploadedFile[]): ProcessedDirectory {
80 const entries: Entry[] = [];
81 let fileCount = 0;
82
83 // Group files by directory
84 const directoryMap = new Map<string, UploadedFile[]>();
85
86 for (const file of files) {
87 // Skip undefined/null files (defensive)
88 if (!file || !file.name) {
89 console.error('Skipping undefined or invalid file in processUploadedFiles');
90 continue;
91 }
92
93 // Remove any base folder name from the path
94 const normalizedPath = file.name.replace(/^[^\/]*\//, '');
95
96 // Skip files in .git directories
97 if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
98 continue;
99 }
100
101 const parts = normalizedPath.split('/');
102
103 if (parts.length === 1) {
104 // Root level file
105 entries.push({
106 name: parts[0],
107 node: {
108 $type: 'place.wisp.fs#file' as const,
109 type: 'file' as const,
110 blob: undefined as any // Will be filled after upload
111 }
112 });
113 fileCount++;
114 } else {
115 // File in subdirectory
116 const dirPath = parts.slice(0, -1).join('/');
117 if (!directoryMap.has(dirPath)) {
118 directoryMap.set(dirPath, []);
119 }
120 directoryMap.get(dirPath)!.push({
121 ...file,
122 name: normalizedPath
123 });
124 }
125 }
126
127 // Process subdirectories
128 for (const [dirPath, dirFiles] of directoryMap) {
129 const dirEntries: Entry[] = [];
130
131 for (const file of dirFiles) {
132 const fileName = file.name.split('/').pop()!;
133 dirEntries.push({
134 name: fileName,
135 node: {
136 $type: 'place.wisp.fs#file' as const,
137 type: 'file' as const,
138 blob: undefined as any // Will be filled after upload
139 }
140 });
141 fileCount++;
142 }
143
144 // Build nested directory structure
145 const pathParts = dirPath.split('/');
146 let currentEntries = entries;
147
148 for (let i = 0; i < pathParts.length; i++) {
149 const part = pathParts[i];
150 const isLast = i === pathParts.length - 1;
151
152 let existingEntry = currentEntries.find(e => e.name === part);
153
154 if (!existingEntry) {
155 const newDir = {
156 $type: 'place.wisp.fs#directory' as const,
157 type: 'directory' as const,
158 entries: isLast ? dirEntries : []
159 };
160
161 existingEntry = {
162 name: part,
163 node: newDir
164 };
165 currentEntries.push(existingEntry);
166 } else if ('entries' in existingEntry.node && isLast) {
167 (existingEntry.node as any).entries.push(...dirEntries);
168 }
169
170 if (existingEntry && 'entries' in existingEntry.node) {
171 currentEntries = (existingEntry.node as any).entries;
172 }
173 }
174 }
175
176 const result = {
177 directory: {
178 $type: 'place.wisp.fs#directory' as const,
179 type: 'directory' as const,
180 entries
181 },
182 fileCount
183 };
184
185 return result;
186}
187
188/**
189 * Create the manifest record for a site
190 */
191export function createManifest(
192 siteName: string,
193 root: Directory,
194 fileCount: number
195): Record {
196 const manifest = {
197 $type: 'place.wisp.fs' as const,
198 site: siteName,
199 root,
200 fileCount,
201 createdAt: new Date().toISOString()
202 };
203
204 // Validate the manifest before returning
205 const validationResult = validateRecord(manifest);
206 if (!validationResult.success) {
207 throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
208 }
209
210 return manifest;
211}
212
213/**
214 * Update file blobs in directory structure after upload
215 * Uses path-based matching to correctly match files in nested directories
216 * Filters out files that were not successfully uploaded
217 */
218export function updateFileBlobs(
219 directory: Directory,
220 uploadResults: FileUploadResult[],
221 filePaths: string[],
222 currentPath: string = '',
223 successfulPaths?: Set<string>
224): Directory {
225 const updatedEntries = directory.entries.map(entry => {
226 if ('type' in entry.node && entry.node.type === 'file') {
227 // Build the full path for this file
228 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
229
230 // If successfulPaths is provided, skip files that weren't successfully uploaded
231 if (successfulPaths && !successfulPaths.has(fullPath)) {
232 return null; // Filter out failed files
233 }
234
235 // Find exact match in filePaths (need to handle normalized paths)
236 const fileIndex = filePaths.findIndex((path) => {
237 // Normalize both paths by removing leading base folder
238 const normalizedUploadPath = path.replace(/^[^\/]*\//, '');
239 const normalizedEntryPath = fullPath;
240 return normalizedUploadPath === normalizedEntryPath || path === fullPath;
241 });
242
243 if (fileIndex !== -1 && uploadResults[fileIndex]) {
244 const result = uploadResults[fileIndex];
245 const blobRef = result.blobRef;
246
247 return {
248 ...entry,
249 node: {
250 $type: 'place.wisp.fs#file' as const,
251 type: 'file' as const,
252 blob: blobRef,
253 ...(result.encoding && { encoding: result.encoding }),
254 ...(result.mimeType && { mimeType: result.mimeType }),
255 ...(result.base64 && { base64: result.base64 })
256 }
257 };
258 } else {
259 console.error(`Could not find blob for file: ${fullPath}`);
260 return null; // Filter out files without blobs
261 }
262 } else if ('type' in entry.node && entry.node.type === 'directory') {
263 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
264 return {
265 ...entry,
266 node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath, successfulPaths)
267 };
268 }
269 return entry;
270 }).filter(entry => entry !== null) as Entry[]; // Remove null entries (failed files)
271
272 const result = {
273 $type: 'place.wisp.fs#directory' as const,
274 type: 'directory' as const,
275 entries: updatedEntries
276 };
277
278 return result;
279}
280
281/**
282 * Compute CID (Content Identifier) for blob content
283 * Uses the same algorithm as AT Protocol: CIDv1 with raw codec and SHA-256
284 * Based on @atproto/common/src/ipld.ts sha256RawToCid implementation
285 */
286export function computeCID(content: Buffer): string {
287 // Use node crypto to compute sha256 hash (same as AT Protocol)
288 const hash = createHash('sha256').update(content).digest();
289 // Create digest object from hash bytes
290 const digest = mf.digest.create(sha256.code, hash);
291 // Create CIDv1 with raw codec
292 const cid = CID.createV1(raw.code, digest);
293 return cid.toString();
294}
295
296/**
297 * Extract blob information from a directory tree
298 * Returns a map of file paths to their blob refs and CIDs
299 */
300export function extractBlobMap(
301 directory: Directory,
302 currentPath: string = ''
303): Map<string, { blobRef: BlobRef; cid: string }> {
304 const blobMap = new Map<string, { blobRef: BlobRef; cid: string }>();
305
306 for (const entry of directory.entries) {
307 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
308
309 if ('type' in entry.node && entry.node.type === 'file') {
310 const fileNode = entry.node as File;
311 // AT Protocol SDK returns BlobRef class instances, not plain objects
312 // The ref is a CID instance that can be converted to string
313 if (fileNode.blob && fileNode.blob.ref) {
314 const cidString = fileNode.blob.ref.toString();
315 blobMap.set(fullPath, {
316 blobRef: fileNode.blob,
317 cid: cidString
318 });
319 }
320 } else if ('type' in entry.node && entry.node.type === 'directory') {
321 const subMap = extractBlobMap(entry.node as Directory, fullPath);
322 subMap.forEach((value, key) => blobMap.set(key, value));
323 }
324 // Skip subfs nodes - they don't contain blobs in the main tree
325 }
326
327 return blobMap;
328}
329
330/**
331 * Extract all subfs URIs from a directory tree with their mount paths
332 */
333export function extractSubfsUris(
334 directory: Directory,
335 currentPath: string = ''
336): Array<{ uri: string; path: string }> {
337 const uris: Array<{ uri: string; path: string }> = [];
338
339 for (const entry of directory.entries) {
340 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
341
342 if ('type' in entry.node) {
343 if (entry.node.type === 'subfs') {
344 // Subfs node with subject URI
345 const subfsNode = entry.node as any;
346 if (subfsNode.subject) {
347 uris.push({ uri: subfsNode.subject, path: fullPath });
348 }
349 } else if (entry.node.type === 'directory') {
350 // Recursively search subdirectories
351 const subUris = extractSubfsUris(entry.node as Directory, fullPath);
352 uris.push(...subUris);
353 }
354 }
355 }
356
357 return uris;
358}
359
360/**
361 * Estimate the JSON size of a directory tree
362 */
363export function estimateDirectorySize(directory: Directory): number {
364 return JSON.stringify(directory).length;
365}
366
367/**
368 * Count files in a directory tree
369 */
370export function countFilesInDirectory(directory: Directory): number {
371 let count = 0;
372 for (const entry of directory.entries) {
373 if ('type' in entry.node && entry.node.type === 'file') {
374 count++;
375 } else if ('type' in entry.node && entry.node.type === 'directory') {
376 count += countFilesInDirectory(entry.node as Directory);
377 }
378 }
379 return count;
380}
381
382/**
383 * Find all directories in a tree with their paths and sizes
384 */
385export function findLargeDirectories(directory: Directory, currentPath: string = ''): Array<{
386 path: string;
387 directory: Directory;
388 size: number;
389 fileCount: number;
390}> {
391 const result: Array<{ path: string; directory: Directory; size: number; fileCount: number }> = [];
392
393 for (const entry of directory.entries) {
394 if ('type' in entry.node && entry.node.type === 'directory') {
395 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
396 const dir = entry.node as Directory;
397 const size = estimateDirectorySize(dir);
398 const fileCount = countFilesInDirectory(dir);
399
400 result.push({ path: dirPath, directory: dir, size, fileCount });
401
402 // Recursively find subdirectories
403 const subdirs = findLargeDirectories(dir, dirPath);
404 result.push(...subdirs);
405 }
406 }
407
408 return result;
409}
410
411/**
412 * Replace a directory with a subfs node in the tree
413 */
414export function replaceDirectoryWithSubfs(
415 directory: Directory,
416 targetPath: string,
417 subfsUri: string
418): Directory {
419 const pathParts = targetPath.split('/');
420 const targetName = pathParts[pathParts.length - 1];
421 const parentPath = pathParts.slice(0, -1).join('/');
422
423 // If this is a root-level directory
424 if (pathParts.length === 1) {
425 const newEntries = directory.entries.map(entry => {
426 if (entry.name === targetName && 'type' in entry.node && entry.node.type === 'directory') {
427 return {
428 name: entry.name,
429 node: {
430 $type: 'place.wisp.fs#subfs' as const,
431 type: 'subfs' as const,
432 subject: subfsUri,
433 flat: false // Preserve directory structure
434 }
435 };
436 }
437 return entry;
438 });
439
440 return {
441 $type: 'place.wisp.fs#directory' as const,
442 type: 'directory' as const,
443 entries: newEntries
444 };
445 }
446
447 // Recursively navigate to parent directory
448 const newEntries = directory.entries.map(entry => {
449 if ('type' in entry.node && entry.node.type === 'directory') {
450 const entryPath = entry.name;
451 if (parentPath.startsWith(entryPath) || parentPath === entry.name) {
452 const remainingPath = pathParts.slice(1).join('/');
453 return {
454 name: entry.name,
455 node: {
456 ...replaceDirectoryWithSubfs(entry.node as Directory, remainingPath, subfsUri),
457 $type: 'place.wisp.fs#directory' as const
458 }
459 };
460 }
461 }
462 return entry;
463 });
464
465 return {
466 $type: 'place.wisp.fs#directory' as const,
467 type: 'directory' as const,
468 entries: newEntries
469 };
470}