Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import type { BlobRef } from "@atproto/api";
2import type { Record, Directory, File, Entry } from "../lexicons/types/place/wisp/fs";
3import { validateRecord } from "../lexicons/types/place/wisp/fs";
4import { gzipSync } from 'zlib';
5import { CID } from 'multiformats/cid';
6import { sha256 } from 'multiformats/hashes/sha2';
7import * as raw from 'multiformats/codecs/raw';
8import { createHash } from 'crypto';
9import * as mf from 'multiformats';
10
11export interface UploadedFile {
12 name: string;
13 content: Buffer;
14 mimeType: string;
15 size: number;
16 compressed?: boolean;
17 originalMimeType?: string;
18}
19
20export interface FileUploadResult {
21 hash: string;
22 blobRef: BlobRef;
23 encoding?: 'gzip';
24 mimeType?: string;
25 base64?: boolean;
26}
27
28export interface ProcessedDirectory {
29 directory: Directory;
30 fileCount: number;
31}
32
33/**
34 * Determine if a file should be gzip compressed based on its MIME type
35 */
36export function shouldCompressFile(mimeType: string): boolean {
37 // Compress text-based files
38 const compressibleTypes = [
39 'text/html',
40 'text/css',
41 'text/javascript',
42 'application/javascript',
43 'application/json',
44 'image/svg+xml',
45 'text/xml',
46 'application/xml',
47 'text/plain',
48 'application/x-javascript'
49 ];
50
51 // Check if mime type starts with any compressible type
52 return compressibleTypes.some(type => mimeType.startsWith(type));
53}
54
55/**
56 * Compress a file using gzip with deterministic output
57 */
58export function compressFile(content: Buffer): Buffer {
59 return gzipSync(content, {
60 level: 9
61 });
62}
63
64/**
65 * Process uploaded files into a directory structure
66 */
67export function processUploadedFiles(files: UploadedFile[]): ProcessedDirectory {
68 const entries: Entry[] = [];
69 let fileCount = 0;
70
71 // Group files by directory
72 const directoryMap = new Map<string, UploadedFile[]>();
73
74 for (const file of files) {
75 // Skip undefined/null files (defensive)
76 if (!file || !file.name) {
77 console.error('Skipping undefined or invalid file in processUploadedFiles');
78 continue;
79 }
80
81 // Remove any base folder name from the path
82 const normalizedPath = file.name.replace(/^[^\/]*\//, '');
83
84 // Skip files in .git directories
85 if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
86 continue;
87 }
88
89 const parts = normalizedPath.split('/');
90
91 if (parts.length === 1) {
92 // Root level file
93 entries.push({
94 name: parts[0],
95 node: {
96 $type: 'place.wisp.fs#file' as const,
97 type: 'file' as const,
98 blob: undefined as any // Will be filled after upload
99 }
100 });
101 fileCount++;
102 } else {
103 // File in subdirectory
104 const dirPath = parts.slice(0, -1).join('/');
105 if (!directoryMap.has(dirPath)) {
106 directoryMap.set(dirPath, []);
107 }
108 directoryMap.get(dirPath)!.push({
109 ...file,
110 name: normalizedPath
111 });
112 }
113 }
114
115 // Process subdirectories
116 for (const [dirPath, dirFiles] of directoryMap) {
117 const dirEntries: Entry[] = [];
118
119 for (const file of dirFiles) {
120 const fileName = file.name.split('/').pop()!;
121 dirEntries.push({
122 name: fileName,
123 node: {
124 $type: 'place.wisp.fs#file' as const,
125 type: 'file' as const,
126 blob: undefined as any // Will be filled after upload
127 }
128 });
129 fileCount++;
130 }
131
132 // Build nested directory structure
133 const pathParts = dirPath.split('/');
134 let currentEntries = entries;
135
136 for (let i = 0; i < pathParts.length; i++) {
137 const part = pathParts[i];
138 const isLast = i === pathParts.length - 1;
139
140 let existingEntry = currentEntries.find(e => e.name === part);
141
142 if (!existingEntry) {
143 const newDir = {
144 $type: 'place.wisp.fs#directory' as const,
145 type: 'directory' as const,
146 entries: isLast ? dirEntries : []
147 };
148
149 existingEntry = {
150 name: part,
151 node: newDir
152 };
153 currentEntries.push(existingEntry);
154 } else if ('entries' in existingEntry.node && isLast) {
155 (existingEntry.node as any).entries.push(...dirEntries);
156 }
157
158 if (existingEntry && 'entries' in existingEntry.node) {
159 currentEntries = (existingEntry.node as any).entries;
160 }
161 }
162 }
163
164 const result = {
165 directory: {
166 $type: 'place.wisp.fs#directory' as const,
167 type: 'directory' as const,
168 entries
169 },
170 fileCount
171 };
172
173 return result;
174}
175
176/**
177 * Create the manifest record for a site
178 */
179export function createManifest(
180 siteName: string,
181 root: Directory,
182 fileCount: number
183): Record {
184 const manifest = {
185 $type: 'place.wisp.fs' as const,
186 site: siteName,
187 root,
188 fileCount,
189 createdAt: new Date().toISOString()
190 };
191
192 // Validate the manifest before returning
193 const validationResult = validateRecord(manifest);
194 if (!validationResult.success) {
195 throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
196 }
197
198 return manifest;
199}
200
201/**
202 * Update file blobs in directory structure after upload
203 * Uses path-based matching to correctly match files in nested directories
204 */
205export function updateFileBlobs(
206 directory: Directory,
207 uploadResults: FileUploadResult[],
208 filePaths: string[],
209 currentPath: string = ''
210): Directory {
211 const updatedEntries = directory.entries.map(entry => {
212 if ('type' in entry.node && entry.node.type === 'file') {
213 // Build the full path for this file
214 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
215
216 // Find exact match in filePaths (need to handle normalized paths)
217 const fileIndex = filePaths.findIndex((path) => {
218 // Normalize both paths by removing leading base folder
219 const normalizedUploadPath = path.replace(/^[^\/]*\//, '');
220 const normalizedEntryPath = fullPath;
221 return normalizedUploadPath === normalizedEntryPath || path === fullPath;
222 });
223
224 if (fileIndex !== -1 && uploadResults[fileIndex]) {
225 const result = uploadResults[fileIndex];
226 const blobRef = result.blobRef;
227
228 return {
229 ...entry,
230 node: {
231 $type: 'place.wisp.fs#file' as const,
232 type: 'file' as const,
233 blob: blobRef,
234 ...(result.encoding && { encoding: result.encoding }),
235 ...(result.mimeType && { mimeType: result.mimeType }),
236 ...(result.base64 && { base64: result.base64 })
237 }
238 };
239 } else {
240 console.error(`❌ BLOB MATCHING ERROR: Could not find blob for file: ${fullPath}`);
241 console.error(` Available paths:`, filePaths.slice(0, 10), filePaths.length > 10 ? `... and ${filePaths.length - 10} more` : '');
242 }
243 } else if ('type' in entry.node && entry.node.type === 'directory') {
244 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
245 return {
246 ...entry,
247 node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath)
248 };
249 }
250 return entry;
251 }) as Entry[];
252
253 const result = {
254 $type: 'place.wisp.fs#directory' as const,
255 type: 'directory' as const,
256 entries: updatedEntries
257 };
258
259 return result;
260}
261
262/**
263 * Compute CID (Content Identifier) for blob content
264 * Uses the same algorithm as AT Protocol: CIDv1 with raw codec and SHA-256
265 * Based on @atproto/common/src/ipld.ts sha256RawToCid implementation
266 */
267export function computeCID(content: Buffer): string {
268 // Use node crypto to compute sha256 hash (same as AT Protocol)
269 const hash = createHash('sha256').update(content).digest();
270 // Create digest object from hash bytes
271 const digest = mf.digest.create(sha256.code, hash);
272 // Create CIDv1 with raw codec
273 const cid = CID.createV1(raw.code, digest);
274 return cid.toString();
275}
276
277/**
278 * Extract blob information from a directory tree
279 * Returns a map of file paths to their blob refs and CIDs
280 */
281export function extractBlobMap(
282 directory: Directory,
283 currentPath: string = ''
284): Map<string, { blobRef: BlobRef; cid: string }> {
285 const blobMap = new Map<string, { blobRef: BlobRef; cid: string }>();
286
287 for (const entry of directory.entries) {
288 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
289
290 if ('type' in entry.node && entry.node.type === 'file') {
291 const fileNode = entry.node as File;
292 // AT Protocol SDK returns BlobRef class instances, not plain objects
293 // The ref is a CID instance that can be converted to string
294 if (fileNode.blob && fileNode.blob.ref) {
295 const cidString = fileNode.blob.ref.toString();
296 blobMap.set(fullPath, {
297 blobRef: fileNode.blob,
298 cid: cidString
299 });
300 }
301 } else if ('type' in entry.node && entry.node.type === 'directory') {
302 const subMap = extractBlobMap(entry.node as Directory, fullPath);
303 subMap.forEach((value, key) => blobMap.set(key, value));
304 }
305 // Skip subfs nodes - they don't contain blobs in the main tree
306 }
307
308 return blobMap;
309}
310
311/**
312 * Extract all subfs URIs from a directory tree with their mount paths
313 */
314export function extractSubfsUris(
315 directory: Directory,
316 currentPath: string = ''
317): Array<{ uri: string; path: string }> {
318 const uris: Array<{ uri: string; path: string }> = [];
319
320 for (const entry of directory.entries) {
321 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
322
323 if ('type' in entry.node) {
324 if (entry.node.type === 'subfs') {
325 // Subfs node with subject URI
326 const subfsNode = entry.node as any;
327 if (subfsNode.subject) {
328 uris.push({ uri: subfsNode.subject, path: fullPath });
329 }
330 } else if (entry.node.type === 'directory') {
331 // Recursively search subdirectories
332 const subUris = extractSubfsUris(entry.node as Directory, fullPath);
333 uris.push(...subUris);
334 }
335 }
336 }
337
338 return uris;
339}
340
341/**
342 * Estimate the JSON size of a directory tree
343 */
344export function estimateDirectorySize(directory: Directory): number {
345 return JSON.stringify(directory).length;
346}
347
348/**
349 * Count files in a directory tree
350 */
351export function countFilesInDirectory(directory: Directory): number {
352 let count = 0;
353 for (const entry of directory.entries) {
354 if ('type' in entry.node && entry.node.type === 'file') {
355 count++;
356 } else if ('type' in entry.node && entry.node.type === 'directory') {
357 count += countFilesInDirectory(entry.node as Directory);
358 }
359 }
360 return count;
361}
362
363/**
364 * Find all directories in a tree with their paths and sizes
365 */
366export function findLargeDirectories(directory: Directory, currentPath: string = ''): Array<{
367 path: string;
368 directory: Directory;
369 size: number;
370 fileCount: number;
371}> {
372 const result: Array<{ path: string; directory: Directory; size: number; fileCount: number }> = [];
373
374 for (const entry of directory.entries) {
375 if ('type' in entry.node && entry.node.type === 'directory') {
376 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
377 const dir = entry.node as Directory;
378 const size = estimateDirectorySize(dir);
379 const fileCount = countFilesInDirectory(dir);
380
381 result.push({ path: dirPath, directory: dir, size, fileCount });
382
383 // Recursively find subdirectories
384 const subdirs = findLargeDirectories(dir, dirPath);
385 result.push(...subdirs);
386 }
387 }
388
389 return result;
390}
391
392/**
393 * Replace a directory with a subfs node in the tree
394 */
395export function replaceDirectoryWithSubfs(
396 directory: Directory,
397 targetPath: string,
398 subfsUri: string
399): Directory {
400 const pathParts = targetPath.split('/');
401 const targetName = pathParts[pathParts.length - 1];
402 const parentPath = pathParts.slice(0, -1).join('/');
403
404 // If this is a root-level directory
405 if (pathParts.length === 1) {
406 const newEntries = directory.entries.map(entry => {
407 if (entry.name === targetName && 'type' in entry.node && entry.node.type === 'directory') {
408 return {
409 name: entry.name,
410 node: {
411 $type: 'place.wisp.fs#subfs' as const,
412 type: 'subfs' as const,
413 subject: subfsUri
414 }
415 };
416 }
417 return entry;
418 });
419
420 return {
421 $type: 'place.wisp.fs#directory' as const,
422 type: 'directory' as const,
423 entries: newEntries
424 };
425 }
426
427 // Recursively navigate to parent directory
428 const newEntries = directory.entries.map(entry => {
429 if ('type' in entry.node && entry.node.type === 'directory') {
430 const entryPath = entry.name;
431 if (parentPath.startsWith(entryPath) || parentPath === entry.name) {
432 const remainingPath = pathParts.slice(1).join('/');
433 return {
434 name: entry.name,
435 node: replaceDirectoryWithSubfs(entry.node as Directory, remainingPath, subfsUri)
436 };
437 }
438 }
439 return entry;
440 });
441
442 return {
443 $type: 'place.wisp.fs#directory' as const,
444 type: 'directory' as const,
445 entries: newEntries
446 };
447}