Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol. wisp.place

fix subfs

Changed files
+277 -107
hosting-service
src
lib
lexicons
src
lib
routes
+39 -41
hosting-service/src/lib/utils.ts
···
/**
* Replace subfs nodes in a directory tree with their actual content
+
* Subfs entries are "merged" - their root entries are hoisted into the parent directory
*/
async function expandSubfsNodes(directory: Directory, pdsEndpoint: string): Promise<Directory> {
// Extract all subfs URIs
···
})
);
-
// Build a map of path -> directory content
-
const subfsMap = new Map<string, Directory>();
+
// Build a map of path -> root entries to merge
+
const subfsMap = new Map<string, Entry[]>();
for (const { record, path } of subfsRecords) {
-
if (record && record.root) {
-
subfsMap.set(path, record.root);
+
if (record && record.root && record.root.entries) {
+
subfsMap.set(path, record.root.entries);
}
}
-
// Replace subfs nodes with their actual content
+
// Replace subfs nodes by merging their root entries into the parent directory
function replaceSubfsInEntries(entries: Entry[], currentPath: string = ''): Entry[] {
-
return entries.map(entry => {
+
const result: Entry[] = [];
+
+
for (const entry of entries) {
const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
const node = entry.node;
if ('type' in node && node.type === 'subfs') {
-
// Replace with actual directory content
-
const subfsDir = subfsMap.get(fullPath);
-
if (subfsDir) {
-
console.log(`Expanding subfs node at ${fullPath}`);
-
return {
-
...entry,
-
node: subfsDir
-
};
+
// Merge subfs entries into parent directory
+
const subfsEntries = subfsMap.get(fullPath);
+
if (subfsEntries) {
+
console.log(`Merging subfs node at ${fullPath} (${subfsEntries.length} entries)`);
+
// Recursively process the merged entries in case they contain nested subfs
+
const processedEntries = replaceSubfsInEntries(subfsEntries, currentPath);
+
result.push(...processedEntries);
+
} else {
+
// If fetch failed, skip this entry
+
console.warn(`Failed to fetch subfs at ${fullPath}, skipping`);
}
-
// If fetch failed, keep the subfs node (will be skipped later)
-
return entry;
} else if ('type' in node && node.type === 'directory' && 'entries' in node) {
// Recursively process subdirectories
-
return {
+
result.push({
...entry,
node: {
...node,
entries: replaceSubfsInEntries(node.entries, fullPath)
}
-
};
+
});
+
} else {
+
// Regular file entry
+
result.push(entry);
}
+
}
-
return entry;
-
});
+
return result;
}
return {
···
console.log(`[Incremental Update] Files to copy: ${copyTasks.length}, Files to download: ${downloadTasks.length}`);
-
// Copy unchanged files in parallel (fast local operations)
-
const copyLimit = 10;
+
// Copy unchanged files in parallel (fast local operations) - increased limit for better performance
+
const copyLimit = 50;
for (let i = 0; i < copyTasks.length; i += copyLimit) {
const batch = copyTasks.slice(i, i + copyLimit);
await Promise.all(batch.map(task => task()));
+
if (copyTasks.length > copyLimit) {
+
console.log(`[Cache Progress] Copied ${Math.min(i + copyLimit, copyTasks.length)}/${copyTasks.length} unchanged files`);
+
}
}
-
// Download new/changed files concurrently with a limit of 3 at a time
-
const downloadLimit = 3;
+
// Download new/changed files concurrently - increased from 3 to 20 for much better performance
+
const downloadLimit = 20;
for (let i = 0; i < downloadTasks.length; i += downloadLimit) {
const batch = downloadTasks.slice(i, i + downloadLimit);
await Promise.all(batch.map(task => task()));
+
if (downloadTasks.length > downloadLimit) {
+
console.log(`[Cache Progress] Downloaded ${Math.min(i + downloadLimit, downloadTasks.length)}/${downloadTasks.length} files`);
+
}
}
}
···
if (existsSync(sourceMetaFile)) {
await copyFile(sourceMetaFile, destMetaFile);
}
-
-
console.log(`[Incremental] Copied unchanged file: ${filePath}`);
} catch (err) {
-
console.error(`[Incremental] Failed to copy file ${filePath}, will attempt download:`, err);
+
console.error(`Failed to copy cached file ${filePath}, will attempt download:`, err);
throw err;
}
}
···
// Allow up to 500MB per file blob, with 5 minute timeout
let content = await safeFetchBlob(blobUrl, { maxSize: 500 * 1024 * 1024, timeout: 300000 });
-
console.log(`[DEBUG] ${filePath}: fetched ${content.length} bytes, base64=${base64}, encoding=${encoding}, mimeType=${mimeType}`);
-
// If content is base64-encoded, decode it back to raw binary (gzipped or not)
if (base64) {
-
const originalSize = content.length;
// Decode base64 directly from raw bytes - no string conversion
// The blob contains base64-encoded text as raw bytes, decode it in-place
const textDecoder = new TextDecoder();
const base64String = textDecoder.decode(content);
content = Buffer.from(base64String, 'base64');
-
console.log(`[DEBUG] ${filePath}: decoded base64 from ${originalSize} bytes to ${content.length} bytes`);
-
-
// Check if it's actually gzipped by looking at magic bytes
-
if (content.length >= 2) {
-
const hasGzipMagic = content[0] === 0x1f && content[1] === 0x8b;
-
console.log(`[DEBUG] ${filePath}: has gzip magic bytes: ${hasGzipMagic}`);
-
}
}
const cacheFile = `${CACHE_DIR}/${did}/${site}${dirSuffix}/${filePath}`;
···
const shouldStayCompressed = shouldCompressMimeType(mimeType);
// Decompress files that shouldn't be stored compressed
-
if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
+
if (encoding === 'gzip' && !shouldStayCompressed && content.length >= 2 &&
content[0] === 0x1f && content[1] === 0x8b) {
-
console.log(`[DEBUG] ${filePath}: decompressing non-compressible type (${mimeType}) before caching`);
try {
const { gunzipSync } = await import('zlib');
const decompressed = gunzipSync(content);
-
console.log(`[DEBUG] ${filePath}: decompressed from ${content.length} to ${decompressed.length} bytes`);
content = decompressed;
// Clear the encoding flag since we're storing decompressed
encoding = undefined;
} catch (error) {
-
console.log(`[DEBUG] ${filePath}: failed to decompress, storing original gzipped content. Error:`, error);
+
console.error(`Failed to decompress ${filePath}, storing original gzipped content:`, error);
}
}
+1 -1
lexicons/fs.json
···
"required": ["type", "subject"],
"properties": {
"type": { "type": "string", "const": "subfs" },
-
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to a place.wisp.subfs record containing this subtree" }
+
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to a place.wisp.subfs record containing this subtree. When expanded, the subfs record's root entries are merged (flattened) into the parent directory - the subfs entry itself is removed and replaced by all entries from the referenced record's root. This allows splitting large directories across multiple records while maintaining a flat structure." }
}
}
}
+2 -2
lexicons/subfs.json
···
"defs": {
"main": {
"type": "record",
-
"description": "Virtual filesystem manifest within a place.wisp.fs record",
+
"description": "Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",
"record": {
"type": "object",
"required": ["root", "createdAt"],
···
"required": ["type", "subject"],
"properties": {
"type": { "type": "string", "const": "subfs" },
-
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees" }
+
"subject": { "type": "string", "format": "at-uri", "description": "AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures." }
}
}
}
+12 -5
src/lib/wisp-utils.ts
···
/**
* Update file blobs in directory structure after upload
* Uses path-based matching to correctly match files in nested directories
+
* Filters out files that were not successfully uploaded
*/
export function updateFileBlobs(
directory: Directory,
uploadResults: FileUploadResult[],
filePaths: string[],
-
currentPath: string = ''
+
currentPath: string = '',
+
successfulPaths?: Set<string>
): Directory {
const updatedEntries = directory.entries.map(entry => {
if ('type' in entry.node && entry.node.type === 'file') {
// Build the full path for this file
const fullPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
+
+
// If successfulPaths is provided, skip files that weren't successfully uploaded
+
if (successfulPaths && !successfulPaths.has(fullPath)) {
+
return null; // Filter out failed files
+
}
// Find exact match in filePaths (need to handle normalized paths)
const fileIndex = filePaths.findIndex((path) => {
···
}
};
} else {
-
console.error(`❌ BLOB MATCHING ERROR: Could not find blob for file: ${fullPath}`);
-
console.error(` Available paths:`, filePaths.slice(0, 10), filePaths.length > 10 ? `... and ${filePaths.length - 10} more` : '');
+
console.error(`Could not find blob for file: ${fullPath}`);
+
return null; // Filter out files without blobs
}
} else if ('type' in entry.node && entry.node.type === 'directory') {
const dirPath = currentPath ? `${currentPath}/${entry.name}` : entry.name;
return {
...entry,
-
node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath)
+
node: updateFileBlobs(entry.node as Directory, uploadResults, filePaths, dirPath, successfulPaths)
};
}
return entry;
-
}) as Entry[];
+
}).filter(entry => entry !== null) as Entry[]; // Remove null entries (failed files)
const result = {
$type: 'place.wisp.fs#directory' as const,
+223 -58
src/routes/wisp.ts
···
size: number;
}> = [];
+
// Track completed files count for accurate progress
+
let completedFilesCount = 0;
+
// Process file with sliding window concurrency
const processFile = async (file: UploadedFile, index: number) => {
try {
···
if (existingBlob && existingBlob.cid === fileCID) {
logger.info(`[File Upload] ♻️ Reused: ${file.name} (unchanged, CID: ${fileCID})`);
+
const reusedCount = (getUploadJob(jobId)?.progress.filesReused || 0) + 1;
+
completedFilesCount++;
updateJobProgress(jobId, {
-
filesReused: (getUploadJob(jobId)?.progress.filesReused || 0) + 1
+
filesReused: reusedCount,
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${file.name} (reused)`
});
return {
···
);
const returnedBlobRef = uploadResult.data.blob;
+
const uploadedCount = (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1;
+
completedFilesCount++;
updateJobProgress(jobId, {
-
filesUploaded: (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1
+
filesUploaded: uploadedCount,
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${file.name} (uploaded)`
});
logger.info(`[File Upload] ✅ Uploaded: ${file.name} (CID: ${fileCID})`);
···
};
logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
+
+
completedFilesCount++;
+
updateJobProgress(jobId, {
+
currentFile: `${completedFilesCount}/${validUploadedFiles.length}: ${fileName} (failed)`
+
});
// Track failed file but don't throw - continue with other files
failedFiles.push({
···
// Wait for remaining uploads
await Promise.all(executing.keys());
+
console.log(`\n✅ Upload complete: ${completedFilesCount}/${validUploadedFiles.length} files processed\n`);
return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries
};
···
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
-
// Update directory with file blobs
+
// Update directory with file blobs (only for successfully uploaded files)
console.log('Updating directory with blob references...');
updateJobProgress(jobId, { phase: 'creating_manifest' });
-
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
+
+
// Create a set of successfully uploaded paths for quick lookup
+
const successfulPaths = new Set(filePaths.map(path => path.replace(/^[^\/]*\//, '')));
+
+
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths, '', successfulPaths);
+
+
// Calculate actual file count (only successfully uploaded files)
+
const actualFileCount = uploadedBlobs.length;
// Check if we need to split into subfs records
// Split proactively if we have lots of files to avoid hitting manifest size limits
const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB)
-
const FILE_COUNT_THRESHOLD = 250; // Start splitting early
+
const FILE_COUNT_THRESHOLD = 250; // Start splitting at this many files
+
const TARGET_FILE_COUNT = 200; // Try to keep main manifest under this many files
const subfsRecords: Array<{ uri: string; path: string }> = [];
let workingDirectory = updatedDirectory;
-
let currentFileCount = fileCount;
+
let currentFileCount = actualFileCount;
// Create initial manifest to check size
-
let manifest = createManifest(siteName, workingDirectory, fileCount);
+
let manifest = createManifest(siteName, workingDirectory, actualFileCount);
let manifestSize = JSON.stringify(manifest).length;
// Split if we have lots of files OR if manifest is already too large
-
if (fileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
-
console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
-
logger.info(`Large site with ${fileCount} files, splitting into subfs records`);
+
if (actualFileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
+
console.log(`⚠️ Large site detected (${actualFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
+
logger.info(`Large site with ${actualFileCount} files, splitting into subfs records`);
-
// Keep splitting until manifest fits under limit
+
// Keep splitting until manifest fits under limits (both size and file count)
let attempts = 0;
const MAX_ATTEMPTS = 100; // Allow many splits for very large sites
-
while (manifestSize > MAX_MANIFEST_SIZE && attempts < MAX_ATTEMPTS) {
+
while ((manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) && attempts < MAX_ATTEMPTS) {
attempts++;
// Find all directories sorted by size (largest first)
const directories = findLargeDirectories(workingDirectory);
directories.sort((a, b) => b.size - a.size);
-
if (directories.length === 0) {
-
// No more directories to split - this should be very rare
-
throw new Error(
-
`Cannot split manifest further - no subdirectories available. ` +
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
-
`Try organizing files into subdirectories.`
-
);
-
}
+
// Check if we can split subdirectories or need to split flat files
+
if (directories.length > 0) {
+
// Split the largest subdirectory
+
const largestDir = directories[0];
+
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
+
+
// Create a subfs record for this directory
+
const subfsRkey = TID.nextStr();
+
const subfsManifest = {
+
$type: 'place.wisp.subfs' as const,
+
root: largestDir.directory,
+
fileCount: largestDir.fileCount,
+
createdAt: new Date().toISOString()
+
};
+
+
// Validate subfs record
+
const subfsValidation = validateSubfsRecord(subfsManifest);
+
if (!subfsValidation.success) {
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
+
}
+
+
// Upload subfs record to PDS
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
+
repo: did,
+
collection: 'place.wisp.subfs',
+
rkey: subfsRkey,
+
record: subfsManifest
+
});
+
+
const subfsUri = subfsRecord.data.uri;
+
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
+
console.log(` ✅ Created subfs: ${subfsUri}`);
+
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
+
+
// Replace directory with subfs node in the main tree
+
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
+
currentFileCount -= largestDir.fileCount;
+
} else {
+
// No subdirectories - split flat files at root level
+
const rootFiles = workingDirectory.entries.filter(e => 'type' in e.node && e.node.type === 'file');
-
// Pick the largest directory
-
const largestDir = directories[0];
-
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
+
if (rootFiles.length === 0) {
+
throw new Error(
+
`Cannot split manifest further - no files or directories available. ` +
+
`Current: ${currentFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB.`
+
);
+
}
-
// Create a subfs record for this directory
-
const subfsRkey = TID.nextStr();
-
const subfsManifest = {
-
$type: 'place.wisp.subfs' as const,
-
root: largestDir.directory,
-
fileCount: largestDir.fileCount,
-
createdAt: new Date().toISOString()
-
};
+
// Take a chunk of files (aim for ~100 files per chunk)
+
const CHUNK_SIZE = 100;
+
const chunkFiles = rootFiles.slice(0, Math.min(CHUNK_SIZE, rootFiles.length));
+
console.log(` Split #${attempts}: flat root (${chunkFiles.length} files)`);
+
+
// Create a directory with just these files
+
const chunkDirectory: Directory = {
+
$type: 'place.wisp.fs#directory' as const,
+
type: 'directory' as const,
+
entries: chunkFiles
+
};
+
+
// Create subfs record for this chunk
+
const subfsRkey = TID.nextStr();
+
const subfsManifest = {
+
$type: 'place.wisp.subfs' as const,
+
root: chunkDirectory,
+
fileCount: chunkFiles.length,
+
createdAt: new Date().toISOString()
+
};
+
+
// Validate subfs record
+
const subfsValidation = validateSubfsRecord(subfsManifest);
+
if (!subfsValidation.success) {
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
+
}
+
+
// Upload subfs record to PDS
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
+
repo: did,
+
collection: 'place.wisp.subfs',
+
rkey: subfsRkey,
+
record: subfsManifest
+
});
+
+
const subfsUri = subfsRecord.data.uri;
+
console.log(` ✅ Created flat subfs: ${subfsUri}`);
+
logger.info(`Created flat subfs record with ${chunkFiles.length} files: ${subfsUri}`);
-
// Validate subfs record
-
const subfsValidation = validateSubfsRecord(subfsManifest);
-
if (!subfsValidation.success) {
-
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
-
}
+
// Remove these files from the working directory and add a subfs entry
+
const remainingEntries = workingDirectory.entries.filter(
+
e => !chunkFiles.some(cf => cf.name === e.name)
+
);
-
// Upload subfs record to PDS
-
const subfsRecord = await agent.com.atproto.repo.putRecord({
-
repo: did,
-
collection: 'place.wisp.subfs',
-
rkey: subfsRkey,
-
record: subfsManifest
-
});
+
// Add subfs entry (will be merged flat when expanded)
+
remainingEntries.push({
+
name: `__subfs_${attempts}`, // Placeholder name, will be merged away
+
node: {
+
$type: 'place.wisp.fs#subfs' as const,
+
type: 'subfs' as const,
+
subject: subfsUri
+
}
+
});
-
const subfsUri = subfsRecord.data.uri;
-
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
-
console.log(` ✅ Created subfs: ${subfsUri}`);
-
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
+
workingDirectory = {
+
$type: 'place.wisp.fs#directory' as const,
+
type: 'directory' as const,
+
entries: remainingEntries
+
};
-
// Replace directory with subfs node in the main tree
-
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
+
subfsRecords.push({ uri: subfsUri, path: `__subfs_${attempts}` });
+
currentFileCount -= chunkFiles.length;
+
}
// Recreate manifest and check new size
-
currentFileCount -= largestDir.fileCount;
-
manifest = createManifest(siteName, workingDirectory, fileCount);
+
manifest = createManifest(siteName, workingDirectory, currentFileCount);
manifestSize = JSON.stringify(manifest).length;
const newSizeKB = (manifestSize / 1024).toFixed(1);
console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);
-
// Check if we're under the limit now
-
if (manifestSize <= MAX_MANIFEST_SIZE) {
-
console.log(` ✅ Manifest fits! (${newSizeKB}KB < 140KB)`);
+
// Check if we're under both limits now
+
if (manifestSize <= MAX_MANIFEST_SIZE && currentFileCount <= TARGET_FILE_COUNT) {
+
console.log(` ✅ Manifest fits! (${currentFileCount} files, ${newSizeKB}KB)`);
break;
}
}
-
if (manifestSize > MAX_MANIFEST_SIZE) {
+
if (manifestSize > MAX_MANIFEST_SIZE || currentFileCount > TARGET_FILE_COUNT) {
throw new Error(
`Failed to fit manifest after splitting ${attempts} directories. ` +
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
+
`Current: ${currentFileCount} files, ${(manifestSize / 1024).toFixed(1)}KB. ` +
`This should never happen - please report this issue.`
);
}
···
const fileArray = Array.isArray(files) ? files : [files];
const jobId = createUploadJob(auth.did, siteName, fileArray.length);
-
// Create agent with OAuth session
-
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
+
// Track upload speeds to estimate progress
+
const uploadStats = {
+
speeds: [] as number[], // MB/s from completed uploads
+
getAverageSpeed(): number {
+
if (this.speeds.length === 0) return 3; // Default 3 MB/s
+
const sum = this.speeds.reduce((a, b) => a + b, 0);
+
return sum / this.speeds.length;
+
}
+
};
+
+
// Create agent with OAuth session and upload progress monitoring
+
const wrappedFetchHandler = async (url: string, init?: RequestInit) => {
+
// Check if this is an uploadBlob request with a body
+
if (url.includes('uploadBlob') && init?.body) {
+
const originalBody = init.body;
+
const bodySize = originalBody instanceof Uint8Array ? originalBody.length :
+
originalBody instanceof ArrayBuffer ? originalBody.byteLength :
+
typeof originalBody === 'string' ? new TextEncoder().encode(originalBody).length : 0;
+
+
const startTime = Date.now();
+
+
if (bodySize > 10 * 1024 * 1024) { // Files over 10MB
+
const sizeMB = (bodySize / 1024 / 1024).toFixed(1);
+
const avgSpeed = uploadStats.getAverageSpeed();
+
const estimatedDuration = (bodySize / 1024 / 1024) / avgSpeed;
+
+
console.log(`[Upload Progress] Starting upload of ${sizeMB}MB file`);
+
console.log(`[Upload Stats] Measured speeds from last ${uploadStats.speeds.length} files:`, uploadStats.speeds.map(s => s.toFixed(2) + ' MB/s').join(', '));
+
console.log(`[Upload Stats] Average speed: ${avgSpeed.toFixed(2)} MB/s, estimated duration: ${estimatedDuration.toFixed(0)}s`);
+
+
// Log estimated progress every 5 seconds
+
const progressInterval = setInterval(() => {
+
const elapsed = (Date.now() - startTime) / 1000;
+
const estimatedPercent = Math.min(95, Math.round((elapsed / estimatedDuration) * 100));
+
const estimatedMB = Math.min(bodySize / 1024 / 1024, elapsed * avgSpeed).toFixed(1);
+
console.log(`[Upload Progress] ~${estimatedPercent}% (~${estimatedMB}/${sizeMB}MB) - ${elapsed.toFixed(0)}s elapsed`);
+
}, 5000);
+
+
try {
+
const result = await auth.session.fetchHandler(url, init);
+
clearInterval(progressInterval);
+
const totalTime = (Date.now() - startTime) / 1000;
+
const actualSpeed = (bodySize / 1024 / 1024) / totalTime;
+
uploadStats.speeds.push(actualSpeed);
+
// Keep only last 10 uploads for rolling average
+
if (uploadStats.speeds.length > 10) uploadStats.speeds.shift();
+
console.log(`[Upload Progress] ✅ Completed ${sizeMB}MB in ${totalTime.toFixed(1)}s (${actualSpeed.toFixed(1)} MB/s)`);
+
return result;
+
} catch (err) {
+
clearInterval(progressInterval);
+
const elapsed = (Date.now() - startTime) / 1000;
+
console.error(`[Upload Progress] ❌ Upload failed after ${elapsed.toFixed(1)}s`);
+
throw err;
+
}
+
} else {
+
// Track small files too for speed calculation
+
try {
+
const result = await auth.session.fetchHandler(url, init);
+
const totalTime = (Date.now() - startTime) / 1000;
+
if (totalTime > 0.5) { // Only track if > 0.5s
+
const actualSpeed = (bodySize / 1024 / 1024) / totalTime;
+
uploadStats.speeds.push(actualSpeed);
+
if (uploadStats.speeds.length > 10) uploadStats.speeds.shift();
+
console.log(`[Upload Stats] Small file: ${(bodySize / 1024).toFixed(1)}KB in ${totalTime.toFixed(2)}s = ${actualSpeed.toFixed(2)} MB/s`);
+
}
+
return result;
+
} catch (err) {
+
throw err;
+
}
+
}
+
}
+
+
// Normal request
+
return auth.session.fetchHandler(url, init);
+
};
+
+
const agent = new Agent(wrappedFetchHandler)
console.log('Agent created for DID:', auth.did);
console.log('Created upload job:', jobId);