Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import { AtpAgent } from '@atproto/api';
2import type { Record as WispFsRecord, Directory, Entry, File } from '../lexicon/types/place/wisp/fs';
3import type { Record as SubfsRecord } from '../lexicon/types/place/wisp/subfs';
4import type { Record as WispSettings } from '../lexicon/types/place/wisp/settings';
5import { existsSync, mkdirSync, readFileSync, rmSync } from 'fs';
6import { writeFile, readFile, rename } from 'fs/promises';
7import { safeFetchJson, safeFetchBlob } from './safe-fetch';
8import { CID } from 'multiformats';
9
10const CACHE_DIR = process.env.CACHE_DIR || './cache/sites';
11const CACHE_TTL = 14 * 24 * 60 * 60 * 1000; // 14 days cache TTL
12
13interface CacheMetadata {
14 recordCid: string;
15 cachedAt: number;
16 did: string;
17 rkey: string;
18 // Map of file path to blob CID for incremental updates
19 fileCids?: Record<string, string>;
20 // Site settings
21 settings?: WispSettings;
22}
23
24/**
25 * Determines if a MIME type should benefit from gzip compression.
26 * Returns true for text-based web assets (HTML, CSS, JS, JSON, XML, SVG).
27 * Returns false for already-compressed formats (images, video, audio, PDFs).
28 *
29 */
30export function shouldCompressMimeType(mimeType: string | undefined): boolean {
31 if (!mimeType) return false;
32
33 const mime = mimeType.toLowerCase();
34
35 // Text-based web assets and uncompressed audio that benefit from compression
36 const compressibleTypes = [
37 'text/html',
38 'text/css',
39 'text/javascript',
40 'application/javascript',
41 'application/x-javascript',
42 'text/xml',
43 'application/xml',
44 'application/json',
45 'text/plain',
46 'image/svg+xml',
47 // Uncompressed audio formats
48 'audio/wav',
49 'audio/wave',
50 'audio/x-wav',
51 'audio/aiff',
52 'audio/x-aiff',
53 ];
54
55 if (compressibleTypes.some(type => mime === type || mime.startsWith(type))) {
56 return true;
57 }
58
59 // Already-compressed formats that should NOT be double-compressed
60 const alreadyCompressedPrefixes = [
61 'video/',
62 'audio/',
63 'image/',
64 'application/pdf',
65 'application/zip',
66 'application/gzip',
67 ];
68
69 if (alreadyCompressedPrefixes.some(prefix => mime.startsWith(prefix))) {
70 return false;
71 }
72
73 // Default to not compressing for unknown types
74 return false;
75}
76
77interface IpldLink {
78 $link: string;
79}
80
81interface TypedBlobRef {
82 ref: CID | IpldLink;
83}
84
85interface UntypedBlobRef {
86 cid: string;
87}
88
89function isIpldLink(obj: unknown): obj is IpldLink {
90 return typeof obj === 'object' && obj !== null && '$link' in obj && typeof (obj as IpldLink).$link === 'string';
91}
92
93function isTypedBlobRef(obj: unknown): obj is TypedBlobRef {
94 return typeof obj === 'object' && obj !== null && 'ref' in obj;
95}
96
97function isUntypedBlobRef(obj: unknown): obj is UntypedBlobRef {
98 return typeof obj === 'object' && obj !== null && 'cid' in obj && typeof (obj as UntypedBlobRef).cid === 'string';
99}
100
101export async function resolveDid(identifier: string): Promise<string | null> {
102 try {
103 // If it's already a DID, return it
104 if (identifier.startsWith('did:')) {
105 return identifier;
106 }
107
108 // Otherwise, resolve the handle using agent's built-in method
109 const agent = new AtpAgent({ service: 'https://public.api.bsky.app' });
110 const response = await agent.resolveHandle({ handle: identifier });
111 return response.data.did;
112 } catch (err) {
113 console.error('Failed to resolve identifier', identifier, err);
114 return null;
115 }
116}
117
118export async function getPdsForDid(did: string): Promise<string | null> {
119 try {
120 let doc;
121
122 if (did.startsWith('did:plc:')) {
123 doc = await safeFetchJson(`https://plc.directory/${encodeURIComponent(did)}`);
124 } else if (did.startsWith('did:web:')) {
125 const didUrl = didWebToHttps(did);
126 doc = await safeFetchJson(didUrl);
127 } else {
128 console.error('Unsupported DID method', did);
129 return null;
130 }
131
132 const services = doc.service || [];
133 const pdsService = services.find((s: any) => s.id === '#atproto_pds');
134
135 return pdsService?.serviceEndpoint || null;
136 } catch (err) {
137 console.error('Failed to get PDS for DID', did, err);
138 return null;
139 }
140}
141
142function didWebToHttps(did: string): string {
143 const didParts = did.split(':');
144 if (didParts.length < 3 || didParts[0] !== 'did' || didParts[1] !== 'web') {
145 throw new Error('Invalid did:web format');
146 }
147
148 const domain = didParts[2];
149 const pathParts = didParts.slice(3);
150
151 if (pathParts.length === 0) {
152 return `https://${domain}/.well-known/did.json`;
153 } else {
154 const path = pathParts.join('/');
155 return `https://${domain}/${path}/did.json`;
156 }
157}
158
159export async function fetchSiteRecord(did: string, rkey: string): Promise<{ record: WispFsRecord; cid: string } | null> {
160 try {
161 const pdsEndpoint = await getPdsForDid(did);
162 if (!pdsEndpoint) return null;
163
164 const url = `${pdsEndpoint}/xrpc/com.atproto.repo.getRecord?repo=${encodeURIComponent(did)}&collection=place.wisp.fs&rkey=${encodeURIComponent(rkey)}`;
165 const data = await safeFetchJson(url);
166
167 return {
168 record: data.value as WispFsRecord,
169 cid: data.cid || ''
170 };
171 } catch (err) {
172 console.error('Failed to fetch site record', did, rkey, err);
173 return null;
174 }
175}
176
177export async function fetchSiteSettings(did: string, rkey: string): Promise<WispSettings | null> {
178 try {
179 const pdsEndpoint = await getPdsForDid(did);
180 if (!pdsEndpoint) return null;
181
182 const url = `${pdsEndpoint}/xrpc/com.atproto.repo.getRecord?repo=${encodeURIComponent(did)}&collection=place.wisp.settings&rkey=${encodeURIComponent(rkey)}`;
183 const data = await safeFetchJson(url);
184
185 return data.value as WispSettings;
186 } catch (err) {
187 // Settings are optional, so return null if not found
188 return null;
189 }
190}
191
192export function extractBlobCid(blobRef: unknown): string | null {
193 if (isIpldLink(blobRef)) {
194 return blobRef.$link;
195 }
196
197 if (isTypedBlobRef(blobRef)) {
198 const ref = blobRef.ref;
199
200 const cid = CID.asCID(ref);
201 if (cid) {
202 return cid.toString();
203 }
204
205 if (isIpldLink(ref)) {
206 return ref.$link;
207 }
208 }
209
210 if (isUntypedBlobRef(blobRef)) {
211 return blobRef.cid;
212 }
213
214 return null;
215}
216
217/**
218 * Extract all subfs URIs from a directory tree with their mount paths
219 */
220function extractSubfsUris(directory: Directory, currentPath: string = ''): Array<{ uri: string; path: string }> {
221 const uris: Array<{ uri: string; path: string }> = [];
222
223 for (const entry of directory.entries) {
224 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
225
226 if ('type' in entry.node) {
227 if (entry.node.type === 'subfs') {
228 // Subfs node with subject URI
229 const subfsNode = entry.node as any;
230 if (subfsNode.subject) {
231 uris.push({ uri: subfsNode.subject, path: fullPath });
232 }
233 } else if (entry.node.type === 'directory') {
234 // Recursively search subdirectories
235 const subUris = extractSubfsUris(entry.node as Directory, fullPath);
236 uris.push(...subUris);
237 }
238 }
239 }
240
241 return uris;
242}
243
244/**
245 * Fetch a subfs record from the PDS
246 */
247async function fetchSubfsRecord(uri: string, pdsEndpoint: string): Promise<SubfsRecord | null> {
248 try {
249 // Parse URI: at://did/collection/rkey
250 const parts = uri.replace('at://', '').split('/');
251 if (parts.length < 3) {
252 console.error('Invalid subfs URI:', uri);
253 return null;
254 }
255
256 const did = parts[0];
257 const collection = parts[1];
258 const rkey = parts[2];
259
260 // Fetch the record from PDS
261 const url = `${pdsEndpoint}/xrpc/com.atproto.repo.getRecord?repo=${encodeURIComponent(did)}&collection=${encodeURIComponent(collection)}&rkey=${encodeURIComponent(rkey)}`;
262 const response = await safeFetchJson(url);
263
264 if (!response || !response.value) {
265 console.error('Subfs record not found:', uri);
266 return null;
267 }
268
269 return response.value as SubfsRecord;
270 } catch (err) {
271 console.error('Failed to fetch subfs record:', uri, err);
272 return null;
273 }
274}
275
276/**
277 * Replace subfs nodes in a directory tree with their actual content
278 * Subfs entries are "merged" - their root entries are hoisted into the parent directory
279 */
280async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {
281 // Extract all subfs URIs
282 const subfsUris = extractSubfsUris(directory);
283
284 if (subfsUris.length === 0) {
285 // No subfs nodes, return as-is
286 return directory;
287 }
288
289 console.log(`Found ${subfsUris.length} subfs records, fetching...`);
290
291 // Fetch all subfs records in parallel
292 const subfsRecords = await Promise.all(
293 subfsUris.map(async ({ uri, path }) => {
294 const record = await fetchSubfsRecord(uri, pdsEndpoint);
295 return { record, path };
296 })
297 );
298
299 // Build a map of path -> root entries to merge
300 const subfsMap = new Map<string, Entry[]>();
301 for (const { record, path } of subfsRecords) {
302 if (record && record.root && record.root.entries) {
303 subfsMap.set(path, record.root.entries);
304 }
305 }
306
307 // Replace subfs nodes by merging their root entries into the parent directory
308 function replaceSubfsInEntries(entries: Entry[], currentPath: string = ''): Entry[] {
309 const result: Entry[] = [];
310
311 for (const entry of entries) {
312 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
313 const node = entry.node;
314
315 if ('type' in node && node.type === 'subfs') {
316 // Check if this is a flat merge or subdirectory merge (default to flat if not specified)
317 const subfsNode = node as any;
318 const isFlat = subfsNode.flat !== false; // Default to true
319 const subfsEntries = subfsMap.get(fullPath);
320
321 if (subfsEntries) {
322 console.log(`Merging subfs node at ${fullPath} (${subfsEntries.length} entries, flat: ${isFlat})`);
323
324 if (isFlat) {
325 // Flat merge: hoist entries directly into parent directory
326 const processedEntries = replaceSubfsInEntries(subfsEntries, currentPath);
327 result.push(...processedEntries);
328 } else {
329 // Subdirectory merge: create a directory with the subfs node's name
330 const processedEntries = replaceSubfsInEntries(subfsEntries, fullPath);
331 result.push({
332 name: entry.name,
333 node: {
334 type: 'directory',
335 entries: processedEntries
336 }
337 });
338 }
339 } else {
340 // If fetch failed, skip this entry
341 console.warn(`Failed to fetch subfs at ${fullPath}, skipping`);
342 }
343 } else if ('type' in node && node.type === 'directory' && 'entries' in node) {
344 // Recursively process subdirectories
345 result.push({
346 ...entry,
347 node: {
348 ...node,
349 entries: replaceSubfsInEntries(node.entries, fullPath)
350 }
351 });
352 } else {
353 // Regular file entry
354 result.push(entry);
355 }
356 }
357
358 return result;
359 }
360
361 return {
362 ...directory,
363 entries: replaceSubfsInEntries(directory.entries)
364 };
365}
366
367export async function downloadAndCacheSite(did: string, rkey: string, record: WispFsRecord, pdsEndpoint: string, recordCid: string): Promise<void> {
368 console.log('Caching site', did, rkey);
369
370 if (!record.root) {
371 console.error('Record missing root directory:', JSON.stringify(record, null, 2));
372 throw new Error('Invalid record structure: missing root directory');
373 }
374
375 if (!record.root.entries || !Array.isArray(record.root.entries)) {
376 console.error('Record root missing entries array:', JSON.stringify(record.root, null, 2));
377 throw new Error('Invalid record structure: root missing entries array');
378 }
379
380 // Expand subfs nodes before caching
381 const expandedRoot = await expandSubfsNodes(record.root, pdsEndpoint);
382
383 // Get existing cache metadata to check for incremental updates
384 const existingMetadata = await getCacheMetadata(did, rkey);
385 const existingFileCids = existingMetadata?.fileCids || {};
386
387 // Use a temporary directory with timestamp to avoid collisions
388 const tempSuffix = `.tmp-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
389 const tempDir = `${CACHE_DIR}/${did}/${rkey}${tempSuffix}`;
390 const finalDir = `${CACHE_DIR}/${did}/${rkey}`;
391
392 try {
393 // Collect file CIDs from the new record (using expanded root)
394 const newFileCids: Record<string, string> = {};
395 collectFileCidsFromEntries(expandedRoot.entries, '', newFileCids);
396
397 // Fetch site settings (optional)
398 const settings = await fetchSiteSettings(did, rkey);
399
400 // Download/copy files to temporary directory (with incremental logic, using expanded root)
401 await cacheFiles(did, rkey, expandedRoot.entries, pdsEndpoint, '', tempSuffix, existingFileCids, finalDir);
402 await saveCacheMetadata(did, rkey, recordCid, tempSuffix, newFileCids, settings);
403
404 // Atomically replace old cache with new cache
405 // On POSIX systems (Linux/macOS), rename is atomic
406 if (existsSync(finalDir)) {
407 // Rename old directory to backup
408 const backupDir = `${finalDir}.old-${Date.now()}`;
409 await rename(finalDir, backupDir);
410
411 try {
412 // Rename new directory to final location
413 await rename(tempDir, finalDir);
414
415 // Clean up old backup
416 rmSync(backupDir, { recursive: true, force: true });
417 } catch (err) {
418 // If rename failed, restore backup
419 if (existsSync(backupDir) && !existsSync(finalDir)) {
420 await rename(backupDir, finalDir);
421 }
422 throw err;
423 }
424 } else {
425 // No existing cache, just rename temp to final
426 await rename(tempDir, finalDir);
427 }
428
429 console.log('Successfully cached site atomically', did, rkey);
430 } catch (err) {
431 // Clean up temp directory on failure
432 if (existsSync(tempDir)) {
433 rmSync(tempDir, { recursive: true, force: true });
434 }
435 throw err;
436 }
437}
438
439/**
440 * Recursively collect file CIDs from entries for incremental update tracking
441 */
442function collectFileCidsFromEntries(entries: Entry[], pathPrefix: string, fileCids: Record<string, string>): void {
443 for (const entry of entries) {
444 const currentPath = pathPrefix ? `${pathPrefix}/${entry.name}` : entry.name;
445 const node = entry.node;
446
447 if ('type' in node && node.type === 'directory' && 'entries' in node) {
448 collectFileCidsFromEntries(node.entries, currentPath, fileCids);
449 } else if ('type' in node && node.type === 'file' && 'blob' in node) {
450 const fileNode = node as File;
451 const cid = extractBlobCid(fileNode.blob);
452 if (cid) {
453 fileCids[currentPath] = cid;
454 }
455 }
456 }
457}
458
459async function cacheFiles(
460 did: string,
461 site: string,
462 entries: Entry[],
463 pdsEndpoint: string,
464 pathPrefix: string,
465 dirSuffix: string = '',
466 existingFileCids: Record<string, string> = {},
467 existingCacheDir?: string
468): Promise<void> {
469 // Collect file tasks, separating unchanged files from new/changed files
470 const downloadTasks: Array<() => Promise<void>> = [];
471 const copyTasks: Array<() => Promise<void>> = [];
472
473 function collectFileTasks(
474 entries: Entry[],
475 currentPathPrefix: string
476 ) {
477 for (const entry of entries) {
478 const currentPath = currentPathPrefix ? `${currentPathPrefix}/${entry.name}` : entry.name;
479 const node = entry.node;
480
481 if ('type' in node && node.type === 'directory' && 'entries' in node) {
482 collectFileTasks(node.entries, currentPath);
483 } else if ('type' in node && node.type === 'file' && 'blob' in node) {
484 const fileNode = node as File;
485 const cid = extractBlobCid(fileNode.blob);
486
487 // Check if file is unchanged (same CID as existing cache)
488 if (cid && existingFileCids[currentPath] === cid && existingCacheDir) {
489 // File unchanged - copy from existing cache instead of downloading
490 copyTasks.push(() => copyExistingFile(
491 did,
492 site,
493 currentPath,
494 dirSuffix,
495 existingCacheDir
496 ));
497 } else {
498 // File new or changed - download it
499 downloadTasks.push(() => cacheFileBlob(
500 did,
501 site,
502 currentPath,
503 fileNode.blob,
504 pdsEndpoint,
505 fileNode.encoding,
506 fileNode.mimeType,
507 fileNode.base64,
508 dirSuffix
509 ));
510 }
511 }
512 }
513 }
514
515 collectFileTasks(entries, pathPrefix);
516
517 console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);
518
519 // Copy unchanged files in parallel (fast local operations) - increased limit for better performance
520 const copyLimit = 50;
521 for (let i = 0; i < copyTasks.length; i += copyLimit) {
522 const batch = copyTasks.slice(i, i + copyLimit);
523 await Promise.all(batch.map(task => task()));
524 if (copyTasks.length > copyLimit) {
525 console.log(`[Cache Progress] Copied ${Math.min(i + copyLimit, copyTasks.length)}/${copyTasks.length} unchanged files`);
526 }
527 }
528
529 // Download new/changed files concurrently - increased from 3 to 20 for much better performance
530 const downloadLimit = 20;
531 let successCount = 0;
532 let failureCount = 0;
533
534 for (let i = 0; i < downloadTasks.length; i += downloadLimit) {
535 const batch = downloadTasks.slice(i, i + downloadLimit);
536 const results = await Promise.allSettled(batch.map(task => task()));
537
538 // Count successes and failures
539 results.forEach((result, index) => {
540 if (result.status === 'fulfilled') {
541 successCount++;
542 } else {
543 failureCount++;
544 console.error(`[Cache] Failed to download file (continuing with others):`, result.reason);
545 }
546 });
547
548 if (downloadTasks.length > downloadLimit) {
549 console.log(`[Cache Progress] Downloaded ${Math.min(i + downloadLimit, downloadTasks.length)}/${downloadTasks.length} files (${failureCount} failed)`);
550 }
551 }
552
553 if (failureCount > 0) {
554 console.warn(`[Cache] Completed with ${successCount} successful and ${failureCount} failed file downloads`);
555 }
556}
557
558/**
559 * Copy an unchanged file from existing cache to new cache location
560 */
561async function copyExistingFile(
562 did: string,
563 site: string,
564 filePath: string,
565 dirSuffix: string,
566 existingCacheDir: string
567): Promise<void> {
568 const { copyFile } = await import('fs/promises');
569
570 const sourceFile = `${existingCacheDir}/${filePath}`;
571 const destFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
572 const destDir = destFile.substring(0, destFile.lastIndexOf('/'));
573
574 // Create destination directory if needed
575 if (destDir && !existsSync(destDir)) {
576 mkdirSync(destDir, { recursive: true });
577 }
578
579 try {
580 // Copy the file
581 await copyFile(sourceFile, destFile);
582
583 // Copy metadata file if it exists
584 const sourceMetaFile = `${sourceFile}.meta`;
585 const destMetaFile = `${destFile}.meta`;
586 if (existsSync(sourceMetaFile)) {
587 await copyFile(sourceMetaFile, destMetaFile);
588 }
589 } catch (err) {
590 console.error(`Failed to copy cached file ${filePath}, will attempt download:`, err);
591 throw err;
592 }
593}
594
595async function cacheFileBlob(
596 did: string,
597 site: string,
598 filePath: string,
599 blobRef: any,
600 pdsEndpoint: string,
601 encoding?: 'gzip',
602 mimeType?: string,
603 base64?: boolean,
604 dirSuffix: string = ''
605): Promise<void> {
606 const cid = extractBlobCid(blobRef);
607 if (!cid) {
608 console.error('Could not extract CID from blob', blobRef);
609 return;
610 }
611
612 const blobUrl = `${pdsEndpoint}/xrpc/com.atproto.sync.getBlob?did=${encodeURIComponent(did)}&cid=${encodeURIComponent(cid)}`;
613
614 console.log(`[Cache] Fetching blob for file: ${filePath}, CID: ${cid}`);
615
616 // Allow up to 500MB per file blob, with 5 minute timeout
617 let content = await safeFetchBlob(blobUrl, { maxSize: 500 * 1024 * 1024, timeout: 300000 });
618
619 // If content is base64-encoded, decode it back to raw binary (gzipped or not)
620 if (base64) {
621 // Decode base64 directly from raw bytes - no string conversion
622 // The blob contains base64-encoded text as raw bytes, decode it in-place
623 const textDecoder = new TextDecoder();
624 const base64String = textDecoder.decode(content);
625 content = Buffer.from(base64String, 'base64');
626 }
627
628 const cacheFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
629 const fileDir = cacheFile.substring(0, cacheFile.lastIndexOf('/'));
630
631 if (fileDir && !existsSync(fileDir)) {
632 mkdirSync(fileDir, { recursive: true });
633 }
634
635 // Use the shared function to determine if this should remain compressed
636 const shouldStayCompressed = shouldCompressMimeType(mimeType);
637
638 // Decompress files that shouldn't be stored compressed
639 if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
640 content[0] === 0x1f && content[1] === 0x8b) {
641 try {
642 const { gunzipSync } = await import('zlib');
643 const decompressed = gunzipSync(content);
644 content = decompressed;
645 // Clear the encoding flag since we're storing decompressed
646 encoding = undefined;
647 } catch (error) {
648 console.error(`Failed to decompress ${filePath}, storing original gzipped content:`, error);
649 }
650 }
651
652 await writeFile(cacheFile, content);
653
654 // Store metadata only if file is still compressed
655 if (encoding === 'gzip' && mimeType) {
656 const metaFile = `${cacheFile}.meta`;
657 await writeFile(metaFile, JSON.stringify({ encoding, mimeType }));
658 console.log('Cached file', filePath, content.length, 'bytes (gzipped,', mimeType + ')');
659 } else {
660 console.log('Cached file', filePath, content.length, 'bytes');
661 }
662}
663
664/**
665 * Sanitize a file path to prevent directory traversal attacks
666 * Removes any path segments that attempt to go up directories
667 */
668export function sanitizePath(filePath: string): string {
669 // Remove leading slashes
670 let cleaned = filePath.replace(/^\/+/, '');
671
672 // Split into segments and filter out dangerous ones
673 const segments = cleaned.split('/').filter(segment => {
674 // Remove empty segments
675 if (!segment || segment === '.') return false;
676 // Remove parent directory references
677 if (segment === '..') return false;
678 // Remove segments with null bytes
679 if (segment.includes('\0')) return false;
680 return true;
681 });
682
683 // Rejoin the safe segments
684 return segments.join('/');
685}
686
687export function getCachedFilePath(did: string, site: string, filePath: string): string {
688 const sanitizedPath = sanitizePath(filePath);
689 return `${CACHE_DIR}/${did}/${site}/${sanitizedPath}`;
690}
691
692export function isCached(did: string, site: string): boolean {
693 return existsSync(`${CACHE_DIR}/${did}/${site}`);
694}
695
696async function saveCacheMetadata(did: string, rkey: string, recordCid: string, dirSuffix: string = '', fileCids?: Record<string, string>, settings?: WispSettings | null): Promise<void> {
697 const metadata: CacheMetadata = {
698 recordCid,
699 cachedAt: Date.now(),
700 did,
701 rkey,
702 fileCids,
703 settings: settings || undefined
704 };
705
706 const metadataPath = `${CACHE_DIR}/${did}/${rkey}${dirSuffix}/.metadata.json`;
707 const metadataDir = metadataPath.substring(0, metadataPath.lastIndexOf('/'));
708
709 if (!existsSync(metadataDir)) {
710 mkdirSync(metadataDir, { recursive: true });
711 }
712
713 await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
714}
715
716async function getCacheMetadata(did: string, rkey: string): Promise<CacheMetadata | null> {
717 try {
718 const metadataPath = `${CACHE_DIR}/${did}/${rkey}/.metadata.json`;
719 if (!existsSync(metadataPath)) return null;
720
721 const content = await readFile(metadataPath, 'utf-8');
722 return JSON.parse(content) as CacheMetadata;
723 } catch (err) {
724 console.error('Failed to read cache metadata', err);
725 return null;
726 }
727}
728
729export async function getCachedSettings(did: string, rkey: string): Promise<WispSettings | null> {
730 const metadata = await getCacheMetadata(did, rkey);
731
732 // If metadata has settings, return them
733 if (metadata?.settings) {
734 return metadata.settings;
735 }
736
737 // If metadata exists but has no settings, try to fetch from PDS and update cache
738 if (metadata) {
739 console.log('[Cache] Metadata missing settings, fetching from PDS', { did, rkey });
740 try {
741 const settings = await fetchSiteSettings(did, rkey);
742 if (settings) {
743 // Update the cached metadata with the fetched settings
744 await updateCacheMetadataSettings(did, rkey, settings);
745 console.log('[Cache] Updated metadata with fetched settings', { did, rkey });
746 return settings;
747 }
748 } catch (err) {
749 console.error('[Cache] Failed to fetch/update settings', { did, rkey, err });
750 }
751 }
752
753 return null;
754}
755
756export async function updateCacheMetadataSettings(did: string, rkey: string, settings: WispSettings | null): Promise<void> {
757 const metadataPath = `${CACHE_DIR}/${did}/${rkey}/.metadata.json`;
758
759 if (!existsSync(metadataPath)) {
760 console.warn('Metadata file does not exist, cannot update settings', { did, rkey });
761 return;
762 }
763
764 try {
765 // Read existing metadata
766 const content = await readFile(metadataPath, 'utf-8');
767 const metadata = JSON.parse(content) as CacheMetadata;
768
769 // Update settings field
770 metadata.settings = settings || undefined;
771
772 // Write back to disk
773 await writeFile(metadataPath, JSON.stringify(metadata, null, 2), 'utf-8');
774 console.log('Updated metadata settings', { did, rkey, hasSettings: !!settings });
775 } catch (err) {
776 console.error('Failed to update metadata settings', err);
777 throw err;
778 }
779}
780
781export async function isCacheValid(did: string, rkey: string, currentRecordCid?: string): Promise<boolean> {
782 const metadata = await getCacheMetadata(did, rkey);
783 if (!metadata) return false;
784
785 // Check if cache has expired (14 days TTL)
786 const cacheAge = Date.now() - metadata.cachedAt;
787 if (cacheAge > CACHE_TTL) {
788 console.log('[Cache] Cache expired for', did, rkey);
789 return false;
790 }
791
792 // If current CID is provided, verify it matches
793 if (currentRecordCid && metadata.recordCid !== currentRecordCid) {
794 console.log('[Cache] CID mismatch for', did, rkey, 'cached:', metadata.recordCid, 'current:', currentRecordCid);
795 return false;
796 }
797
798 return true;
799}