Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import type { BlobRef } from "@atproto/api";
2import type { Record, Directory, File, Entry } from "../lexicons/types/place/wisp/fs";
3import { validateRecord } from "../lexicons/types/place/wisp/fs";
4import { gzipSync } from 'zlib';
5import { CID } from 'multiformats/cid';
6import { sha256 } from 'multiformats/hashes/sha2';
7import * as raw from 'multiformats/codecs/raw';
8import { createHash } from 'crypto';
9import * as mf from 'multiformats';
10
11export interface UploadedFile {
12 name: string;
13 content: Buffer;
14 mimeType: string;
15 size: number;
16 compressed?: boolean;
17 originalMimeType?: string;
18}
19
20export interface FileUploadResult {
21 hash: string;
22 blobRef: BlobRef;
23 encoding?: 'gzip';
24 mimeType?: string;
25 base64?: boolean;
26}
27
28export interface ProcessedDirectory {
29 directory: Directory;
30 fileCount: number;
31}
32
33/**
34 * Determine if a file should be gzip compressed based on its MIME type
35 */
36export function shouldCompressFile(mimeType: string): boolean {
37 // Compress text-based files
38 const compressibleTypes = [
39 'text/html',
40 'text/css',
41 'text/javascript',
42 'application/javascript',
43 'application/json',
44 'image/svg+xml',
45 'text/xml',
46 'application/xml',
47 'text/plain',
48 'application/x-javascript'
49 ];
50
51 // Check if mime type starts with any compressible type
52 return compressibleTypes.some(type => mimeType.startsWith(type));
53}
54
55/**
56 * Compress a file using gzip with deterministic output
57 * Sets mtime to 0 to ensure identical content produces identical compressed output
58 */
59export function compressFile(content: Buffer): Buffer {
60 return gzipSync(content, {
61 level: 9,
62 mtime: 0 // Fixed timestamp for deterministic compression
63 });
64}
65
66/**
67 * Process uploaded files into a directory structure
68 */
69export function processUploadedFiles(files: UploadedFile[]): ProcessedDirectory {
70 const entries: Entry[] = [];
71 let fileCount = 0;
72
73 // Group files by directory
74 const directoryMap = new Map<string, UploadedFile[]>();
75
76 for (const file of files) {
77 // Skip undefined/null files (defensive)
78 if (!file || !file.name) {
79 console.error('Skipping undefined or invalid file in processUploadedFiles');
80 continue;
81 }
82
83 // Remove any base folder name from the path
84 const normalizedPath = file.name.replace(/^[^\/]*\//, '');
85 const parts = normalizedPath.split('/');
86
87 if (parts.length === 1) {
88 // Root level file
89 entries.push({
90 name: parts[0],
91 node: {
92 $type: 'place.wisp.fs#file' as const,
93 type: 'file' as const,
94 blob: undefined as any // Will be filled after upload
95 }
96 });
97 fileCount++;
98 } else {
99 // File in subdirectory
100 const dirPath = parts.slice(0, -1).join('/');
101 if (!directoryMap.has(dirPath)) {
102 directoryMap.set(dirPath, []);
103 }
104 directoryMap.get(dirPath)!.push({
105 ...file,
106 name: normalizedPath
107 });
108 }
109 }
110
111 // Process subdirectories
112 for (const [dirPath, dirFiles] of directoryMap) {
113 const dirEntries: Entry[] = [];
114
115 for (const file of dirFiles) {
116 const fileName = file.name.split('/').pop()!;
117 dirEntries.push({
118 name: fileName,
119 node: {
120 $type: 'place.wisp.fs#file' as const,
121 type: 'file' as const,
122 blob: undefined as any // Will be filled after upload
123 }
124 });
125 fileCount++;
126 }
127
128 // Build nested directory structure
129 const pathParts = dirPath.split('/');
130 let currentEntries = entries;
131
132 for (let i = 0; i < pathParts.length; i++) {
133 const part = pathParts[i];
134 const isLast = i === pathParts.length - 1;
135
136 let existingEntry = currentEntries.find(e => e.name === part);
137
138 if (!existingEntry) {
139 const newDir = {
140 $type: 'place.wisp.fs#directory' as const,
141 type: 'directory' as const,
142 entries: isLast ? dirEntries : []
143 };
144
145 existingEntry = {
146 name: part,
147 node: newDir
148 };
149 currentEntries.push(existingEntry);
150 } else if ('entries' in existingEntry.node && isLast) {
151 (existingEntry.node as any).entries.push(...dirEntries);
152 }
153
154 if (existingEntry && 'entries' in existingEntry.node) {
155 currentEntries = (existingEntry.node as any).entries;
156 }
157 }
158 }
159
160 const result = {
161 directory: {
162 $type: 'place.wisp.fs#directory' as const,
163 type: 'directory' as const,
164 entries
165 },
166 fileCount
167 };
168
169 return result;
170}
171
172/**
173 * Create the manifest record for a site
174 */
175export function createManifest(
176 siteName: string,
177 root: Directory,
178 fileCount: number
179): Record {
180 const manifest = {
181 $type: 'place.wisp.fs' as const,
182 site: siteName,
183 root,
184 fileCount,
185 createdAt: new Date().toISOString()
186 };
187
188 // Validate the manifest before returning
189 const validationResult = validateRecord(manifest);
190 if (!validationResult.success) {
191 throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
192 }
193
194 return manifest;
195}
196
197/**
198 * Update file blobs in directory structure after upload
199 * Uses path-based matching to correctly match files in nested directories
200 */
201export function updateFileBlobs(
202 directory: Directory,
203 uploadResults: FileUploadResult[],
204 filePaths: string[],
205 currentPath: string = ''
206): Directory {
207 const updatedEntries = directory.entries.map(entry => {
208 if ('type' in entry.node && entry.node.type === 'file') {
209 // Build the full path for this file
210 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
211
212 // Find exact match in filePaths (need to handle normalized paths)
213 const fileIndex = filePaths.findIndex((path) => {
214 // Normalize both paths by removing leading base folder
215 const normalizedUploadPath = path.replace(/^[^\/]*\//, '');
216 const normalizedEntryPath = fullPath;
217 return normalizedUploadPath === normalizedEntryPath || path === fullPath;
218 });
219
220 if (fileIndex !== -1 && uploadResults[fileIndex]) {
221 const result = uploadResults[fileIndex];
222 const blobRef = result.blobRef;
223
224 return {
225 ...entry,
226 node: {
227 $type: 'place.wisp.fs#file' as const,
228 type: 'file' as const,
229 blob: blobRef,
230 ...(result.encoding && { encoding: result.encoding }),
231 ...(result.mimeType && { mimeType: result.mimeType }),
232 ...(result.base64 && { base64: result.base64 })
233 }
234 };
235 } else {
236 console.error(`❌ BLOB MATCHING ERROR: Could not find blob for file: ${fullPath}`);
237 console.error(` Available paths:`, filePaths.slice(0, 10), filePaths.length > 10 ? `... and ${filePaths.length - 10} more` : '');
238 }
239 } else if ('type' in entry.node && entry.node.type === 'directory') {
240 const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
241 return {
242 ...entry,
243 node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath)
244 };
245 }
246 return entry;
247 }) as Entry[];
248
249 const result = {
250 $type: 'place.wisp.fs#directory' as const,
251 type: 'directory' as const,
252 entries: updatedEntries
253 };
254
255 return result;
256}
257
258/**
259 * Compute CID (Content Identifier) for blob content
260 * Uses the same algorithm as AT Protocol: CIDv1 with raw codec and SHA-256
261 * Based on @atproto/common/src/ipld.ts sha256RawToCid implementation
262 */
263export function computeCID(content: Buffer): string {
264 // Use node crypto to compute sha256 hash (same as AT Protocol)
265 const hash = createHash('sha256').update(content).digest();
266 // Create digest object from hash bytes
267 const digest = mf.digest.create(sha256.code, hash);
268 // Create CIDv1 with raw codec
269 const cid = CID.createV1(raw.code, digest);
270 return cid.toString();
271}
272
273/**
274 * Extract blob information from a directory tree
275 * Returns a map of file paths to their blob refs and CIDs
276 */
277export function extractBlobMap(
278 directory: Directory,
279 currentPath: string = ''
280): Map<string, { blobRef: BlobRef; cid: string }> {
281 const blobMap = new Map<string, { blobRef: BlobRef; cid: string }>();
282
283 for (const entry of directory.entries) {
284 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
285
286 if ('type' in entry.node && entry.node.type === 'file') {
287 const fileNode = entry.node as File;
288 // AT Protocol SDK returns BlobRef class instances, not plain objects
289 // The ref is a CID instance that can be converted to string
290 if (fileNode.blob && fileNode.blob.ref) {
291 const cidString = fileNode.blob.ref.toString();
292 blobMap.set(fullPath, {
293 blobRef: fileNode.blob,
294 cid: cidString
295 });
296 }
297 } else if ('type' in entry.node && entry.node.type === 'directory') {
298 const subMap = extractBlobMap(entry.node as Directory, fullPath);
299 subMap.forEach((value, key) => blobMap.set(key, value));
300 }
301 }
302
303 return blobMap;
304}