+39
-41
hosting-service/src/lib/utils.ts
+39
-41
hosting-service/src/lib/utils.ts
···async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {······console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);······-console.log(`[DEBUG] ${filePath}: fetched ${content.length} bytes, base64=${base64}, encoding=${encoding}, mimeType=${mimeType}`);-console.log(`[DEBUG] ${filePath}: decoded base64 from ${originalSize} bytes to ${content.length} bytes`);···-console.log(`[DEBUG] ${filePath}: decompressing non-compressible type (${mimeType}) before caching`);-console.log(`[DEBUG] ${filePath}: decompressed from ${content.length} to ${decompressed.length} bytes`);-console.log(`[DEBUG] ${filePath}: failed to decompress, storing original gzipped content. Error:`, error);
···async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {······console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);+// Copy unchanged files in parallel (fast local operations) - increased limit for better performance+console.log(`[Cache Progress] Copied ${Math.min(i + copyLimit, copyTasks.length)}/${copyTasks.length} unchanged files`);+// Download new/changed files concurrently - increased from 3 to 20 for much better performance+console.log(`[Cache Progress] Downloaded ${Math.min(i + downloadLimit, downloadTasks.length)}/${downloadTasks.length} files`);·········
+1
-1
lexicons/fs.json
+1
-1
lexicons/fs.json
···+"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to a place.wisp.subfs record containing this subtree. When expanded, the subfs record's root entries are merged (flattened) into the parent directory - the subfs entry itself is removed and replaced by all entries from the referenced record's root. This allows splitting large directories across multiple records while maintaining a flat structure." }
+2
-2
lexicons/subfs.json
+2
-2
lexicons/subfs.json
······-"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees" }
···+"description": "Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",···+"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures." }
+12
-5
src/lib/wisp-utils.ts
+12
-5
src/lib/wisp-utils.ts
······-console.error(` Available paths:`, filePaths.slice(0, 10), filePaths.length > 10 ? `... and ${filePaths.length - 10} more` : '');
······+node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath, successfulPaths)
+223
-58
src/routes/wisp.ts
+223
-58
src/routes/wisp.ts
············logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);···return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries···-console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);-console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);-throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);···
············logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);···+console.log(`\n✅ Upload complete: ${completedFilesCount}/${validUploadedFiles.length} files processed\n`);return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries···+const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths, '', successfulPaths);+console.log(`⚠️ Large site detected (${actualFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);+while ((manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) && attempts < MAX_ATTEMPTS) {+console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);+throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);+const rootFiles = workingDirectory.entries.filter(e => 'type' in e.node && e.node.type === 'file');+throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);···+console.log(`[Upload Stats] Measured speeds from last ${uploadStats.speeds.length} files:`, uploadStats.speeds.map(s => s.toFixed(2) + ' MB/s').join(', '));+console.log(`[Upload Stats] Average speed: ${avgSpeed.toFixed(2)} MB/s, estimated duration: ${estimatedDuration.toFixed(0)}s`);+console.log(`[Upload Progress] ~${estimatedPercent}% (~${estimatedMB}/${sizeMB}MB) - ${elapsed.toFixed(0)}s elapsed`);+console.log(`[Upload Progress] ✅ Completed ${sizeMB}MB in ${totalTime.toFixed(1)}s (${actualSpeed.toFixed(1)} MB/s)`);+console.log(`[Upload Stats] Small file: ${(bodySize / 1024).toFixed(1)}KB in ${totalTime.toFixed(2)}s = ${actualSpeed.toFixed(2)} MB/s`);