Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol.
wisp.place
1import { AtpAgent } from '@atproto/api';
2import type { Record as WispFsRecord, Directory, Entry, File } from '../lexicon/types/place/wisp/fs';
3import type { Record as SubfsRecord } from '../lexicon/types/place/wisp/subfs';
4import { existsSync, mkdirSync, readFileSync, rmSync } from 'fs';
5import { writeFile, readFile, rename } from 'fs/promises';
6import { safeFetchJson, safeFetchBlob } from './safe-fetch';
7import { CID } from 'multiformats';
8
9const CACHE_DIR = process.env.CACHE_DIR || './cache/sites';
10const CACHE_TTL = 14 * 24 * 60 * 60 * 1000; // 14 days cache TTL
11
12interface CacheMetadata {
13 recordCid: string;
14 cachedAt: number;
15 did: string;
16 rkey: string;
17 // Map of file path to blob CID for incremental updates
18 fileCids?: Record<string, string>;
19}
20
21/**
22 * Determines if a MIME type should benefit from gzip compression.
23 * Returns true for text-based web assets (HTML, CSS, JS, JSON, XML, SVG).
24 * Returns false for already-compressed formats (images, video, audio, PDFs).
25 *
26 */
27export function shouldCompressMimeType(mimeType: string | undefined): boolean {
28 if (!mimeType) return false;
29
30 const mime = mimeType.toLowerCase();
31
32 // Text-based web assets and uncompressed audio that benefit from compression
33 const compressibleTypes = [
34 'text/html',
35 'text/css',
36 'text/javascript',
37 'application/javascript',
38 'application/x-javascript',
39 'text/xml',
40 'application/xml',
41 'application/json',
42 'text/plain',
43 'image/svg+xml',
44 // Uncompressed audio formats
45 'audio/wav',
46 'audio/wave',
47 'audio/x-wav',
48 'audio/aiff',
49 'audio/x-aiff',
50 ];
51
52 if (compressibleTypes.some(type => mime === type || mime.startsWith(type))) {
53 return true;
54 }
55
56 // Already-compressed formats that should NOT be double-compressed
57 const alreadyCompressedPrefixes = [
58 'video/',
59 'audio/',
60 'image/',
61 'application/pdf',
62 'application/zip',
63 'application/gzip',
64 ];
65
66 if (alreadyCompressedPrefixes.some(prefix => mime.startsWith(prefix))) {
67 return false;
68 }
69
70 // Default to not compressing for unknown types
71 return false;
72}
73
74interface IpldLink {
75 $link: string;
76}
77
78interface TypedBlobRef {
79 ref: CID | IpldLink;
80}
81
82interface UntypedBlobRef {
83 cid: string;
84}
85
86function isIpldLink(obj: unknown): obj is IpldLink {
87 return typeof obj === 'object' && obj !== null && '$link' in obj && typeof (obj as IpldLink).$link === 'string';
88}
89
90function isTypedBlobRef(obj: unknown): obj is TypedBlobRef {
91 return typeof obj === 'object' && obj !== null && 'ref' in obj;
92}
93
94function isUntypedBlobRef(obj: unknown): obj is UntypedBlobRef {
95 return typeof obj === 'object' && obj !== null && 'cid' in obj && typeof (obj as UntypedBlobRef).cid === 'string';
96}
97
98export async function resolveDid(identifier: string): Promise<string | null> {
99 try {
100 // If it's already a DID, return it
101 if (identifier.startsWith('did:')) {
102 return identifier;
103 }
104
105 // Otherwise, resolve the handle using agent's built-in method
106 const agent = new AtpAgent({ service: 'https://public.api.bsky.app' });
107 const response = await agent.resolveHandle({ handle: identifier });
108 return response.data.did;
109 } catch (err) {
110 console.error('Failed to resolve identifier', identifier, err);
111 return null;
112 }
113}
114
115export async function getPdsForDid(did: string): Promise<string | null> {
116 try {
117 let doc;
118
119 if (did.startsWith('did:plc:')) {
120 doc = await safeFetchJson(`https://plc.directory/${encodeURIComponent(did)}`);
121 } else if (did.startsWith('did:web:')) {
122 const didUrl = didWebToHttps(did);
123 doc = await safeFetchJson(didUrl);
124 } else {
125 console.error('Unsupported DID method', did);
126 return null;
127 }
128
129 const services = doc.service || [];
130 const pdsService = services.find((s: any) => s.id === '#atproto_pds');
131
132 return pdsService?.serviceEndpoint || null;
133 } catch (err) {
134 console.error('Failed to get PDS for DID', did, err);
135 return null;
136 }
137}
138
139function didWebToHttps(did: string): string {
140 const didParts = did.split(':');
141 if (didParts.length < 3 || didParts[0] !== 'did' || didParts[1] !== 'web') {
142 throw new Error('Invalid did:web format');
143 }
144
145 const domain = didParts[2];
146 const pathParts = didParts.slice(3);
147
148 if (pathParts.length === 0) {
149 return `https://${domain}/.well-known/did.json`;
150 } else {
151 const path = pathParts.join('/');
152 return `https://${domain}/${path}/did.json`;
153 }
154}
155
156export async function fetchSiteRecord(did: string, rkey: string): Promise<{ record: WispFsRecord; cid: string } | null> {
157 try {
158 const pdsEndpoint = await getPdsForDid(did);
159 if (!pdsEndpoint) return null;
160
161 const url = `${pdsEndpoint}/xrpc/com.atproto.repo.getRecord?repo=${encodeURIComponent(did)}&collection=place.wisp.fs&rkey=${encodeURIComponent(rkey)}`;
162 const data = await safeFetchJson(url);
163
164 return {
165 record: data.value as WispFsRecord,
166 cid: data.cid || ''
167 };
168 } catch (err) {
169 console.error('Failed to fetch site record', did, rkey, err);
170 return null;
171 }
172}
173
174export function extractBlobCid(blobRef: unknown): string | null {
175 if (isIpldLink(blobRef)) {
176 return blobRef.$link;
177 }
178
179 if (isTypedBlobRef(blobRef)) {
180 const ref = blobRef.ref;
181
182 const cid = CID.asCID(ref);
183 if (cid) {
184 return cid.toString();
185 }
186
187 if (isIpldLink(ref)) {
188 return ref.$link;
189 }
190 }
191
192 if (isUntypedBlobRef(blobRef)) {
193 return blobRef.cid;
194 }
195
196 return null;
197}
198
199/**
200 * Extract all subfs URIs from a directory tree with their mount paths
201 */
202function extractSubfsUris(directory: Directory, currentPath: string = ''): Array<{ uri: string; path: string }> {
203 const uris: Array<{ uri: string; path: string }> = [];
204
205 for (const entry of directory.entries) {
206 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
207
208 if ('type' in entry.node) {
209 if (entry.node.type === 'subfs') {
210 // Subfs node with subject URI
211 const subfsNode = entry.node as any;
212 if (subfsNode.subject) {
213 uris.push({ uri: subfsNode.subject, path: fullPath });
214 }
215 } else if (entry.node.type === 'directory') {
216 // Recursively search subdirectories
217 const subUris = extractSubfsUris(entry.node as Directory, fullPath);
218 uris.push(...subUris);
219 }
220 }
221 }
222
223 return uris;
224}
225
226/**
227 * Fetch a subfs record from the PDS
228 */
229async function fetchSubfsRecord(uri: string, pdsEndpoint: string): Promise<SubfsRecord | null> {
230 try {
231 // Parse URI: at://did/collection/rkey
232 const parts = uri.replace('at://', '').split('/');
233 if (parts.length < 3) {
234 console.error('Invalid subfs URI:', uri);
235 return null;
236 }
237
238 const did = parts[0];
239 const collection = parts[1];
240 const rkey = parts[2];
241
242 // Fetch the record from PDS
243 const url = `${pdsEndpoint}/xrpc/com.atproto.repo.getRecord?repo=${encodeURIComponent(did)}&collection=${encodeURIComponent(collection)}&rkey=${encodeURIComponent(rkey)}`;
244 const response = await safeFetchJson(url);
245
246 if (!response || !response.value) {
247 console.error('Subfs record not found:', uri);
248 return null;
249 }
250
251 return response.value as SubfsRecord;
252 } catch (err) {
253 console.error('Failed to fetch subfs record:', uri, err);
254 return null;
255 }
256}
257
258/**
259 * Replace subfs nodes in a directory tree with their actual content
260 * Subfs entries are "merged" - their root entries are hoisted into the parent directory
261 */
262async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {
263 // Extract all subfs URIs
264 const subfsUris = extractSubfsUris(directory);
265
266 if (subfsUris.length === 0) {
267 // No subfs nodes, return as-is
268 return directory;
269 }
270
271 console.log(`Found ${subfsUris.length} subfs records, fetching...`);
272
273 // Fetch all subfs records in parallel
274 const subfsRecords = await Promise.all(
275 subfsUris.map(async ({ uri, path }) => {
276 const record = await fetchSubfsRecord(uri, pdsEndpoint);
277 return { record, path };
278 })
279 );
280
281 // Build a map of path -> root entries to merge
282 const subfsMap = new Map<string, Entry[]>();
283 for (const { record, path } of subfsRecords) {
284 if (record && record.root && record.root.entries) {
285 subfsMap.set(path, record.root.entries);
286 }
287 }
288
289 // Replace subfs nodes by merging their root entries into the parent directory
290 function replaceSubfsInEntries(entries: Entry[], currentPath: string = ''): Entry[] {
291 const result: Entry[] = [];
292
293 for (const entry of entries) {
294 const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
295 const node = entry.node;
296
297 if ('type' in node && node.type === 'subfs') {
298 // Check if this is a flat merge or subdirectory merge (default to flat if not specified)
299 const subfsNode = node as any;
300 const isFlat = subfsNode.flat !== false; // Default to true
301 const subfsEntries = subfsMap.get(fullPath);
302
303 if (subfsEntries) {
304 console.log(`Merging subfs node at ${fullPath} (${subfsEntries.length} entries, flat: ${isFlat})`);
305
306 if (isFlat) {
307 // Flat merge: hoist entries directly into parent directory
308 const processedEntries = replaceSubfsInEntries(subfsEntries, currentPath);
309 result.push(...processedEntries);
310 } else {
311 // Subdirectory merge: create a directory with the subfs node's name
312 const processedEntries = replaceSubfsInEntries(subfsEntries, fullPath);
313 result.push({
314 name: entry.name,
315 node: {
316 type: 'directory',
317 entries: processedEntries
318 }
319 });
320 }
321 } else {
322 // If fetch failed, skip this entry
323 console.warn(`Failed to fetch subfs at ${fullPath}, skipping`);
324 }
325 } else if ('type' in node && node.type === 'directory' && 'entries' in node) {
326 // Recursively process subdirectories
327 result.push({
328 ...entry,
329 node: {
330 ...node,
331 entries: replaceSubfsInEntries(node.entries, fullPath)
332 }
333 });
334 } else {
335 // Regular file entry
336 result.push(entry);
337 }
338 }
339
340 return result;
341 }
342
343 return {
344 ...directory,
345 entries: replaceSubfsInEntries(directory.entries)
346 };
347}
348
349export async function downloadAndCacheSite(did: string, rkey: string, record: WispFsRecord, pdsEndpoint: string, recordCid: string): Promise<void> {
350 console.log('Caching site', did, rkey);
351
352 if (!record.root) {
353 console.error('Record missing root directory:', JSON.stringify(record, null, 2));
354 throw new Error('Invalid record structure: missing root directory');
355 }
356
357 if (!record.root.entries || !Array.isArray(record.root.entries)) {
358 console.error('Record root missing entries array:', JSON.stringify(record.root, null, 2));
359 throw new Error('Invalid record structure: root missing entries array');
360 }
361
362 // Expand subfs nodes before caching
363 const expandedRoot = await expandSubfsNodes(record.root, pdsEndpoint);
364
365 // Get existing cache metadata to check for incremental updates
366 const existingMetadata = await getCacheMetadata(did, rkey);
367 const existingFileCids = existingMetadata?.fileCids || {};
368
369 // Use a temporary directory with timestamp to avoid collisions
370 const tempSuffix = `.tmp-${Date.now()}-${Math.random().toString(36).slice(2, 9)}`;
371 const tempDir = `${CACHE_DIR}/${did}/${rkey}${tempSuffix}`;
372 const finalDir = `${CACHE_DIR}/${did}/${rkey}`;
373
374 try {
375 // Collect file CIDs from the new record (using expanded root)
376 const newFileCids: Record<string, string> = {};
377 collectFileCidsFromEntries(expandedRoot.entries, '', newFileCids);
378
379 // Download/copy files to temporary directory (with incremental logic, using expanded root)
380 await cacheFiles(did, rkey, expandedRoot.entries, pdsEndpoint, '', tempSuffix, existingFileCids, finalDir);
381 await saveCacheMetadata(did, rkey, recordCid, tempSuffix, newFileCids);
382
383 // Atomically replace old cache with new cache
384 // On POSIX systems (Linux/macOS), rename is atomic
385 if (existsSync(finalDir)) {
386 // Rename old directory to backup
387 const backupDir = `${finalDir}.old-${Date.now()}`;
388 await rename(finalDir, backupDir);
389
390 try {
391 // Rename new directory to final location
392 await rename(tempDir, finalDir);
393
394 // Clean up old backup
395 rmSync(backupDir, { recursive: true, force: true });
396 } catch (err) {
397 // If rename failed, restore backup
398 if (existsSync(backupDir) && !existsSync(finalDir)) {
399 await rename(backupDir, finalDir);
400 }
401 throw err;
402 }
403 } else {
404 // No existing cache, just rename temp to final
405 await rename(tempDir, finalDir);
406 }
407
408 console.log('Successfully cached site atomically', did, rkey);
409 } catch (err) {
410 // Clean up temp directory on failure
411 if (existsSync(tempDir)) {
412 rmSync(tempDir, { recursive: true, force: true });
413 }
414 throw err;
415 }
416}
417
418/**
419 * Recursively collect file CIDs from entries for incremental update tracking
420 */
421function collectFileCidsFromEntries(entries: Entry[], pathPrefix: string, fileCids: Record<string, string>): void {
422 for (const entry of entries) {
423 const currentPath = pathPrefix ? `${pathPrefix}/${entry.name}` : entry.name;
424 const node = entry.node;
425
426 if ('type' in node && node.type === 'directory' && 'entries' in node) {
427 collectFileCidsFromEntries(node.entries, currentPath, fileCids);
428 } else if ('type' in node && node.type === 'file' && 'blob' in node) {
429 const fileNode = node as File;
430 const cid = extractBlobCid(fileNode.blob);
431 if (cid) {
432 fileCids[currentPath] = cid;
433 }
434 }
435 }
436}
437
438async function cacheFiles(
439 did: string,
440 site: string,
441 entries: Entry[],
442 pdsEndpoint: string,
443 pathPrefix: string,
444 dirSuffix: string = '',
445 existingFileCids: Record<string, string> = {},
446 existingCacheDir?: string
447): Promise<void> {
448 // Collect file tasks, separating unchanged files from new/changed files
449 const downloadTasks: Array<() => Promise<void>> = [];
450 const copyTasks: Array<() => Promise<void>> = [];
451
452 function collectFileTasks(
453 entries: Entry[],
454 currentPathPrefix: string
455 ) {
456 for (const entry of entries) {
457 const currentPath = currentPathPrefix ? `${currentPathPrefix}/${entry.name}` : entry.name;
458 const node = entry.node;
459
460 if ('type' in node && node.type === 'directory' && 'entries' in node) {
461 collectFileTasks(node.entries, currentPath);
462 } else if ('type' in node && node.type === 'file' && 'blob' in node) {
463 const fileNode = node as File;
464 const cid = extractBlobCid(fileNode.blob);
465
466 // Check if file is unchanged (same CID as existing cache)
467 if (cid && existingFileCids[currentPath] === cid && existingCacheDir) {
468 // File unchanged - copy from existing cache instead of downloading
469 copyTasks.push(() => copyExistingFile(
470 did,
471 site,
472 currentPath,
473 dirSuffix,
474 existingCacheDir
475 ));
476 } else {
477 // File new or changed - download it
478 downloadTasks.push(() => cacheFileBlob(
479 did,
480 site,
481 currentPath,
482 fileNode.blob,
483 pdsEndpoint,
484 fileNode.encoding,
485 fileNode.mimeType,
486 fileNode.base64,
487 dirSuffix
488 ));
489 }
490 }
491 }
492 }
493
494 collectFileTasks(entries, pathPrefix);
495
496 console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);
497
498 // Copy unchanged files in parallel (fast local operations) - increased limit for better performance
499 const copyLimit = 50;
500 for (let i = 0; i < copyTasks.length; i += copyLimit) {
501 const batch = copyTasks.slice(i, i + copyLimit);
502 await Promise.all(batch.map(task => task()));
503 if (copyTasks.length > copyLimit) {
504 console.log(`[Cache Progress] Copied ${Math.min(i + copyLimit, copyTasks.length)}/${copyTasks.length} unchanged files`);
505 }
506 }
507
508 // Download new/changed files concurrently - increased from 3 to 20 for much better performance
509 const downloadLimit = 20;
510 let successCount = 0;
511 let failureCount = 0;
512
513 for (let i = 0; i < downloadTasks.length; i += downloadLimit) {
514 const batch = downloadTasks.slice(i, i + downloadLimit);
515 const results = await Promise.allSettled(batch.map(task => task()));
516
517 // Count successes and failures
518 results.forEach((result, index) => {
519 if (result.status === 'fulfilled') {
520 successCount++;
521 } else {
522 failureCount++;
523 console.error(`[Cache] Failed to download file (continuing with others):`, result.reason);
524 }
525 });
526
527 if (downloadTasks.length > downloadLimit) {
528 console.log(`[Cache Progress] Downloaded ${Math.min(i + downloadLimit, downloadTasks.length)}/${downloadTasks.length} files (${failureCount} failed)`);
529 }
530 }
531
532 if (failureCount > 0) {
533 console.warn(`[Cache] Completed with ${successCount} successful and ${failureCount} failed file downloads`);
534 }
535}
536
537/**
538 * Copy an unchanged file from existing cache to new cache location
539 */
540async function copyExistingFile(
541 did: string,
542 site: string,
543 filePath: string,
544 dirSuffix: string,
545 existingCacheDir: string
546): Promise<void> {
547 const { copyFile } = await import('fs/promises');
548
549 const sourceFile = `${existingCacheDir}/${filePath}`;
550 const destFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
551 const destDir = destFile.substring(0, destFile.lastIndexOf('/'));
552
553 // Create destination directory if needed
554 if (destDir && !existsSync(destDir)) {
555 mkdirSync(destDir, { recursive: true });
556 }
557
558 try {
559 // Copy the file
560 await copyFile(sourceFile, destFile);
561
562 // Copy metadata file if it exists
563 const sourceMetaFile = `${sourceFile}.meta`;
564 const destMetaFile = `${destFile}.meta`;
565 if (existsSync(sourceMetaFile)) {
566 await copyFile(sourceMetaFile, destMetaFile);
567 }
568 } catch (err) {
569 console.error(`Failed to copy cached file ${filePath}, will attempt download:`, err);
570 throw err;
571 }
572}
573
574async function cacheFileBlob(
575 did: string,
576 site: string,
577 filePath: string,
578 blobRef: any,
579 pdsEndpoint: string,
580 encoding?: 'gzip',
581 mimeType?: string,
582 base64?: boolean,
583 dirSuffix: string = ''
584): Promise<void> {
585 const cid = extractBlobCid(blobRef);
586 if (!cid) {
587 console.error('Could not extract CID from blob', blobRef);
588 return;
589 }
590
591 const blobUrl = `${pdsEndpoint}/xrpc/com.atproto.sync.getBlob?did=${encodeURIComponent(did)}&cid=${encodeURIComponent(cid)}`;
592
593 console.log(`[Cache] Fetching blob for file: ${filePath}, CID: ${cid}`);
594
595 // Allow up to 500MB per file blob, with 5 minute timeout
596 let content = await safeFetchBlob(blobUrl, { maxSize: 500 * 1024 * 1024, timeout: 300000 });
597
598 // If content is base64-encoded, decode it back to raw binary (gzipped or not)
599 if (base64) {
600 // Decode base64 directly from raw bytes - no string conversion
601 // The blob contains base64-encoded text as raw bytes, decode it in-place
602 const textDecoder = new TextDecoder();
603 const base64String = textDecoder.decode(content);
604 content = Buffer.from(base64String, 'base64');
605 }
606
607 const cacheFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
608 const fileDir = cacheFile.substring(0, cacheFile.lastIndexOf('/'));
609
610 if (fileDir && !existsSync(fileDir)) {
611 mkdirSync(fileDir, { recursive: true });
612 }
613
614 // Use the shared function to determine if this should remain compressed
615 const shouldStayCompressed = shouldCompressMimeType(mimeType);
616
617 // Decompress files that shouldn't be stored compressed
618 if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
619 content[0] === 0x1f && content[1] === 0x8b) {
620 try {
621 const { gunzipSync } = await import('zlib');
622 const decompressed = gunzipSync(content);
623 content = decompressed;
624 // Clear the encoding flag since we're storing decompressed
625 encoding = undefined;
626 } catch (error) {
627 console.error(`Failed to decompress ${filePath}, storing original gzipped content:`, error);
628 }
629 }
630
631 await writeFile(cacheFile, content);
632
633 // Store metadata only if file is still compressed
634 if (encoding === 'gzip' && mimeType) {
635 const metaFile = `${cacheFile}.meta`;
636 await writeFile(metaFile, JSON.stringify({ encoding, mimeType }));
637 console.log('Cached file', filePath, content.length, 'bytes (gzipped,', mimeType + ')');
638 } else {
639 console.log('Cached file', filePath, content.length, 'bytes');
640 }
641}
642
643/**
644 * Sanitize a file path to prevent directory traversal attacks
645 * Removes any path segments that attempt to go up directories
646 */
647export function sanitizePath(filePath: string): string {
648 // Remove leading slashes
649 let cleaned = filePath.replace(/^\/+/, '');
650
651 // Split into segments and filter out dangerous ones
652 const segments = cleaned.split('/').filter(segment => {
653 // Remove empty segments
654 if (!segment || segment === '.') return false;
655 // Remove parent directory references
656 if (segment === '..') return false;
657 // Remove segments with null bytes
658 if (segment.includes('\0')) return false;
659 return true;
660 });
661
662 // Rejoin the safe segments
663 return segments.join('/');
664}
665
666export function getCachedFilePath(did: string, site: string, filePath: string): string {
667 const sanitizedPath = sanitizePath(filePath);
668 return `${CACHE_DIR}/${did}/${site}/${sanitizedPath}`;
669}
670
671export function isCached(did: string, site: string): boolean {
672 return existsSync(`${CACHE_DIR}/${did}/${site}`);
673}
674
675async function saveCacheMetadata(did: string, rkey: string, recordCid: string, dirSuffix: string = '', fileCids?: Record<string, string>): Promise<void> {
676 const metadata: CacheMetadata = {
677 recordCid,
678 cachedAt: Date.now(),
679 did,
680 rkey,
681 fileCids
682 };
683
684 const metadataPath = `${CACHE_DIR}/${did}/${rkey}${dirSuffix}/.metadata.json`;
685 const metadataDir = metadataPath.substring(0, metadataPath.lastIndexOf('/'));
686
687 if (!existsSync(metadataDir)) {
688 mkdirSync(metadataDir, { recursive: true });
689 }
690
691 await writeFile(metadataPath, JSON.stringify(metadata, null, 2));
692}
693
694async function getCacheMetadata(did: string, rkey: string): Promise<CacheMetadata | null> {
695 try {
696 const metadataPath = `${CACHE_DIR}/${did}/${rkey}/.metadata.json`;
697 if (!existsSync(metadataPath)) return null;
698
699 const content = await readFile(metadataPath, 'utf-8');
700 return JSON.parse(content) as CacheMetadata;
701 } catch (err) {
702 console.error('Failed to read cache metadata', err);
703 return null;
704 }
705}
706
707export async function isCacheValid(did: string, rkey: string, currentRecordCid?: string): Promise<boolean> {
708 const metadata = await getCacheMetadata(did, rkey);
709 if (!metadata) return false;
710
711 // Check if cache has expired (14 days TTL)
712 const cacheAge = Date.now() - metadata.cachedAt;
713 if (cacheAge > CACHE_TTL) {
714 console.log('[Cache] Cache expired for', did, rkey);
715 return false;
716 }
717
718 // If current CID is provided, verify it matches
719 if (currentRecordCid && metadata.recordCid !== currentRecordCid) {
720 console.log('[Cache] CID mismatch for', did, rkey, 'cached:', metadata.recordCid, 'current:', currentRecordCid);
721 return false;
722 }
723
724 return true;
725}