Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol. wisp.place

implement automatic subfs splitting for large sites

- Splits manifests >140KB or sites with 250+ files into subfs records
- Creates subfs records with TID-based rkeys for uniqueness
- Splits largest directories first until manifest fits under limit
- Fetches and merges blob maps from all subfs records for full reuse
- Deletes old subfs records after successful upload

Upload flow:
1. Fetch existing main + all subfs records
2. Build combined blob map for incremental updates
3. Upload blobs (reuses across all records)
4. Split directories into subfs if needed (auto-detect)
5. Upload main manifest with subfs references
6. Clean up orphaned subfs records from previous version

Changed files
+1013 -327
public
editor
src
lib
routes
+141 -22
public/editor/tabs/UploadTab.tsx
···
-
import { useState, useEffect } from 'react'
+
import { useState, useEffect, useRef } from 'react'
import {
Card,
CardContent,
···
const [skippedFiles, setSkippedFiles] = useState<Array<{ name: string; reason: string }>>([])
const [uploadedCount, setUploadedCount] = useState(0)
+
// Keep SSE connection alive across tab switches
+
const eventSourceRef = useRef<EventSource | null>(null)
+
const currentJobIdRef = useRef<string | null>(null)
+
// Auto-switch to 'new' mode if no sites exist
useEffect(() => {
if (!sitesLoading && sites.length === 0 && siteMode === 'existing') {
···
}
}, [sites, sitesLoading, siteMode])
+
// Cleanup SSE connection on unmount
+
useEffect(() => {
+
return () => {
+
// Don't close the connection on unmount (tab switch)
+
// It will be reused when the component remounts
+
}
+
}, [])
+
const handleFileSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
if (e.target.files && e.target.files.length > 0) {
setSelectedFiles(e.target.files)
}
}
+
const setupSSE = (jobId: string) => {
+
// Close existing connection if any
+
if (eventSourceRef.current) {
+
eventSourceRef.current.close()
+
}
+
+
currentJobIdRef.current = jobId
+
const eventSource = new EventSource(`/wisp/upload-progress/${jobId}`)
+
eventSourceRef.current = eventSource
+
+
eventSource.addEventListener('progress', (event) => {
+
const progressData = JSON.parse(event.data)
+
const { progress, status } = progressData
+
+
// Update progress message based on phase
+
let message = 'Processing...'
+
if (progress.phase === 'validating') {
+
message = 'Validating files...'
+
} else if (progress.phase === 'compressing') {
+
const current = progress.filesProcessed || 0
+
const total = progress.totalFiles || 0
+
message = `Compressing files (${current}/${total})...`
+
if (progress.currentFile) {
+
message += ` - ${progress.currentFile}`
+
}
+
} else if (progress.phase === 'uploading') {
+
const uploaded = progress.filesUploaded || 0
+
const reused = progress.filesReused || 0
+
const total = progress.totalFiles || 0
+
message = `Uploading to PDS (${uploaded + reused}/${total})...`
+
} else if (progress.phase === 'creating_manifest') {
+
message = 'Creating manifest...'
+
} else if (progress.phase === 'finalizing') {
+
message = 'Finalizing upload...'
+
}
+
+
setUploadProgress(message)
+
})
+
+
eventSource.addEventListener('done', (event) => {
+
const result = JSON.parse(event.data)
+
eventSource.close()
+
eventSourceRef.current = null
+
currentJobIdRef.current = null
+
+
setUploadProgress('Upload complete!')
+
setSkippedFiles(result.skippedFiles || [])
+
setUploadedCount(result.uploadedCount || result.fileCount || 0)
+
setSelectedSiteRkey('')
+
setNewSiteName('')
+
setSelectedFiles(null)
+
+
// Refresh sites list
+
onUploadComplete()
+
+
// Reset form
+
const resetDelay = result.skippedFiles && result.skippedFiles.length > 0 ? 4000 : 1500
+
setTimeout(() => {
+
setUploadProgress('')
+
setSkippedFiles([])
+
setUploadedCount(0)
+
setIsUploading(false)
+
}, resetDelay)
+
})
+
+
eventSource.addEventListener('error', (event) => {
+
const errorData = JSON.parse((event as any).data || '{}')
+
eventSource.close()
+
eventSourceRef.current = null
+
currentJobIdRef.current = null
+
+
console.error('Upload error:', errorData)
+
alert(
+
`Upload failed: ${errorData.error || 'Unknown error'}`
+
)
+
setIsUploading(false)
+
setUploadProgress('')
+
})
+
+
eventSource.onerror = () => {
+
eventSource.close()
+
eventSourceRef.current = null
+
currentJobIdRef.current = null
+
+
console.error('SSE connection error')
+
alert('Lost connection to upload progress. The upload may still be processing.')
+
setIsUploading(false)
+
setUploadProgress('')
+
}
+
}
+
const handleUpload = async () => {
const siteName = siteMode === 'existing' ? selectedSiteRkey : newSiteName
···
}
}
-
setUploadProgress('Uploading to AT Protocol...')
+
// If no files, handle synchronously (old behavior)
+
if (!selectedFiles || selectedFiles.length === 0) {
+
setUploadProgress('Creating empty site...')
+
const response = await fetch('/wisp/upload-files', {
+
method: 'POST',
+
body: formData
+
})
+
+
const data = await response.json()
+
if (data.success) {
+
setUploadProgress('Site created!')
+
setSelectedSiteRkey('')
+
setNewSiteName('')
+
setSelectedFiles(null)
+
+
await onUploadComplete()
+
+
setTimeout(() => {
+
setUploadProgress('')
+
setIsUploading(false)
+
}, 1500)
+
} else {
+
throw new Error(data.error || 'Upload failed')
+
}
+
return
+
}
+
+
// For file uploads, use SSE for progress
+
setUploadProgress('Starting upload...')
const response = await fetch('/wisp/upload-files', {
method: 'POST',
body: formData
})
const data = await response.json()
-
if (data.success) {
-
setUploadProgress('Upload complete!')
-
setSkippedFiles(data.skippedFiles || [])
-
setUploadedCount(data.uploadedCount || data.fileCount || 0)
-
setSelectedSiteRkey('')
-
setNewSiteName('')
-
setSelectedFiles(null)
+
if (!data.success || !data.jobId) {
+
throw new Error(data.error || 'Failed to start upload')
+
}
-
// Refresh sites list
-
await onUploadComplete()
+
const jobId = data.jobId
+
setUploadProgress('Connecting to progress stream...')
-
// Reset form - give more time if there are skipped files
-
const resetDelay = data.skippedFiles && data.skippedFiles.length > 0 ? 4000 : 1500
-
setTimeout(() => {
-
setUploadProgress('')
-
setSkippedFiles([])
-
setUploadedCount(0)
-
setIsUploading(false)
-
}, resetDelay)
-
} else {
-
throw new Error(data.error || 'Upload failed')
-
}
+
// Setup SSE connection (persists across tab switches via ref)
+
setupSSE(jobId)
+
} catch (err) {
console.error('Upload error:', err)
alert(
+197
src/lib/upload-jobs.ts
···
+
import { logger } from './observability';
+
+
export type UploadJobStatus = 'pending' | 'processing' | 'uploading' | 'completed' | 'failed';
+
+
export interface UploadProgress {
+
filesProcessed: number;
+
totalFiles: number;
+
filesUploaded: number;
+
filesReused: number;
+
currentFile?: string;
+
phase: 'validating' | 'compressing' | 'uploading' | 'creating_manifest' | 'finalizing' | 'done';
+
}
+
+
export interface UploadJob {
+
id: string;
+
did: string;
+
siteName: string;
+
status: UploadJobStatus;
+
progress: UploadProgress;
+
result?: {
+
success: boolean;
+
uri?: string;
+
cid?: string;
+
fileCount?: number;
+
siteName?: string;
+
skippedFiles?: Array<{ name: string; reason: string }>;
+
uploadedCount?: number;
+
};
+
error?: string;
+
createdAt: number;
+
updatedAt: number;
+
}
+
+
// In-memory job storage
+
const jobs = new Map<string, UploadJob>();
+
+
// SSE connections for each job
+
const jobListeners = new Map<string, Set<(event: string, data: any) => void>>();
+
+
// Cleanup old jobs after 1 hour
+
const JOB_TTL = 60 * 60 * 1000;
+
+
export function createUploadJob(did: string, siteName: string, totalFiles: number): string {
+
const id = crypto.randomUUID();
+
const now = Date.now();
+
+
const job: UploadJob = {
+
id,
+
did,
+
siteName,
+
status: 'pending',
+
progress: {
+
filesProcessed: 0,
+
totalFiles,
+
filesUploaded: 0,
+
filesReused: 0,
+
phase: 'validating'
+
},
+
createdAt: now,
+
updatedAt: now
+
};
+
+
jobs.set(id, job);
+
logger.info(`Upload job created: ${id} for ${did}/${siteName} (${totalFiles} files)`);
+
+
// Schedule cleanup
+
setTimeout(() => {
+
jobs.delete(id);
+
jobListeners.delete(id);
+
logger.info(`Upload job cleaned up: ${id}`);
+
}, JOB_TTL);
+
+
return id;
+
}
+
+
export function getUploadJob(id: string): UploadJob | undefined {
+
return jobs.get(id);
+
}
+
+
export function updateUploadJob(
+
id: string,
+
updates: Partial<Omit<UploadJob, 'id' | 'did' | 'siteName' | 'createdAt'>>
+
): void {
+
const job = jobs.get(id);
+
if (!job) {
+
logger.warn(`Attempted to update non-existent job: ${id}`);
+
return;
+
}
+
+
Object.assign(job, updates, { updatedAt: Date.now() });
+
jobs.set(id, job);
+
+
// Notify all listeners
+
const listeners = jobListeners.get(id);
+
if (listeners && listeners.size > 0) {
+
const eventData = {
+
status: job.status,
+
progress: job.progress,
+
result: job.result,
+
error: job.error
+
};
+
+
const failedListeners: Array<(event: string, data: any) => void> = [];
+
listeners.forEach(listener => {
+
try {
+
listener('progress', eventData);
+
} catch (err) {
+
// Client disconnected, remove this listener
+
failedListeners.push(listener);
+
}
+
});
+
+
// Remove failed listeners
+
failedListeners.forEach(listener => listeners.delete(listener));
+
}
+
}
+
+
export function completeUploadJob(id: string, result: UploadJob['result']): void {
+
updateUploadJob(id, {
+
status: 'completed',
+
progress: {
+
...getUploadJob(id)!.progress,
+
phase: 'done'
+
},
+
result
+
});
+
+
// Send final event and close connections
+
setTimeout(() => {
+
const listeners = jobListeners.get(id);
+
if (listeners) {
+
listeners.forEach(listener => {
+
try {
+
listener('done', result);
+
} catch (err) {
+
// Client already disconnected, ignore
+
}
+
});
+
jobListeners.delete(id);
+
}
+
}, 100);
+
}
+
+
export function failUploadJob(id: string, error: string): void {
+
updateUploadJob(id, {
+
status: 'failed',
+
error
+
});
+
+
// Send error event and close connections
+
setTimeout(() => {
+
const listeners = jobListeners.get(id);
+
if (listeners) {
+
listeners.forEach(listener => {
+
try {
+
listener('error', { error });
+
} catch (err) {
+
// Client already disconnected, ignore
+
}
+
});
+
jobListeners.delete(id);
+
}
+
}, 100);
+
}
+
+
export function addJobListener(jobId: string, listener: (event: string, data: any) => void): () => void {
+
if (!jobListeners.has(jobId)) {
+
jobListeners.set(jobId, new Set());
+
}
+
jobListeners.get(jobId)!.add(listener);
+
+
// Return cleanup function
+
return () => {
+
const listeners = jobListeners.get(jobId);
+
if (listeners) {
+
listeners.delete(listener);
+
if (listeners.size === 0) {
+
jobListeners.delete(jobId);
+
}
+
}
+
};
+
}
+
+
export function updateJobProgress(
+
jobId: string,
+
progressUpdate: Partial<UploadProgress>
+
): void {
+
const job = getUploadJob(jobId);
+
if (!job) return;
+
+
updateUploadJob(jobId, {
+
progress: {
+
...job.progress,
+
...progressUpdate
+
}
+
});
+
}
+675 -305
src/routes/wisp.ts
···
import { requireAuth, type AuthenticatedContext } from '../lib/wisp-auth'
import { NodeOAuthClient } from '@atproto/oauth-client-node'
import { Agent } from '@atproto/api'
+
import { TID } from '@atproto/common-web'
import {
type UploadedFile,
type FileUploadResult,
···
shouldCompressFile,
compressFile,
computeCID,
-
extractBlobMap
+
extractBlobMap,
+
extractSubfsUris,
+
findLargeDirectories,
+
replaceDirectoryWithSubfs,
+
estimateDirectorySize
} from '../lib/wisp-utils'
import { upsertSite } from '../lib/db'
import { logger } from '../lib/observability'
import { validateRecord } from '../lexicons/types/place/wisp/fs'
+
import { validateRecord as validateSubfsRecord } from '../lexicons/types/place/wisp/subfs'
import { MAX_SITE_SIZE, MAX_FILE_SIZE, MAX_FILE_COUNT } from '../lib/constants'
+
import {
+
createUploadJob,
+
getUploadJob,
+
updateJobProgress,
+
completeUploadJob,
+
failUploadJob,
+
addJobListener
+
} from '../lib/upload-jobs'
function isValidSiteName(siteName: string): boolean {
if (!siteName || typeof siteName !== 'string') return false;
···
return true;
}
+
async function processUploadInBackground(
+
jobId: string,
+
agent: Agent,
+
did: string,
+
siteName: string,
+
fileArray: File[]
+
): Promise<void> {
+
try {
+
// Try to fetch existing record to enable incremental updates
+
let existingBlobMap = new Map<string, { blobRef: any; cid: string }>();
+
let oldSubfsUris: Array<{ uri: string; path: string }> = [];
+
console.log('Attempting to fetch existing record...');
+
updateJobProgress(jobId, { phase: 'validating' });
+
+
try {
+
const rkey = siteName;
+
const existingRecord = await agent.com.atproto.repo.getRecord({
+
repo: did,
+
collection: 'place.wisp.fs',
+
rkey: rkey
+
});
+
console.log('Existing record found!');
+
+
if (existingRecord.data.value && typeof existingRecord.data.value === 'object' && 'root' in existingRecord.data.value) {
+
const manifest = existingRecord.data.value as any;
+
+
// Extract blob map from main record
+
existingBlobMap = extractBlobMap(manifest.root);
+
console.log(`Found existing manifest with ${existingBlobMap.size} files in main record`);
+
+
// Extract subfs URIs with their mount paths from main record
+
const subfsUris = extractSubfsUris(manifest.root);
+
oldSubfsUris = subfsUris; // Save for cleanup later
+
+
if (subfsUris.length > 0) {
+
console.log(`Found ${subfsUris.length} subfs records, fetching in parallel...`);
+
logger.info(`Fetching ${subfsUris.length} subfs records for blob reuse`);
+
+
// Fetch all subfs records in parallel
+
const subfsRecords = await Promise.all(
+
subfsUris.map(async ({ uri, path }) => {
+
try {
+
// Parse URI: at://did/collection/rkey
+
const parts = uri.replace('at://', '').split('/');
+
const subDid = parts[0];
+
const collection = parts[1];
+
const subRkey = parts[2];
+
+
const record = await agent.com.atproto.repo.getRecord({
+
repo: subDid,
+
collection: collection,
+
rkey: subRkey
+
});
+
+
return { record: record.data.value as any, mountPath: path };
+
} catch (err: any) {
+
logger.warn(`Failed to fetch subfs record ${uri}: ${err?.message}`, err);
+
return null;
+
}
+
})
+
);
+
+
// Merge blob maps from all subfs records
+
let totalSubfsBlobs = 0;
+
for (const subfsData of subfsRecords) {
+
if (subfsData && subfsData.record && 'root' in subfsData.record) {
+
// Extract blobs with the correct mount path prefix
+
const subfsMap = extractBlobMap(subfsData.record.root, subfsData.mountPath);
+
subfsMap.forEach((value, key) => {
+
existingBlobMap.set(key, value);
+
totalSubfsBlobs++;
+
});
+
}
+
}
+
+
console.log(`Merged ${totalSubfsBlobs} files from ${subfsUris.length} subfs records`);
+
logger.info(`Total blob map: ${existingBlobMap.size} files (main + subfs)`);
+
}
+
+
console.log(`Total existing blobs for reuse: ${existingBlobMap.size} files`);
+
logger.info(`Found existing manifest with ${existingBlobMap.size} files for incremental update`);
+
}
+
} catch (error: any) {
+
console.log('No existing record found or error:', error?.message || error);
+
if (error?.status !== 400 && error?.error !== 'RecordNotFound') {
+
logger.warn('Failed to fetch existing record, proceeding with full upload', error);
+
}
+
}
+
+
// Convert File objects to UploadedFile format
+
const uploadedFiles: UploadedFile[] = [];
+
const skippedFiles: Array<{ name: string; reason: string }> = [];
+
+
console.log('Processing files, count:', fileArray.length);
+
updateJobProgress(jobId, { phase: 'compressing' });
+
+
for (let i = 0; i < fileArray.length; i++) {
+
const file = fileArray[i];
+
console.log(`Processing file ${i + 1}/${fileArray.length}:`, file.name, file.size, 'bytes');
+
updateJobProgress(jobId, {
+
filesProcessed: i + 1,
+
currentFile: file.name
+
});
+
+
// Skip .git directory files
+
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
+
if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
+
console.log(`Skipping .git file: ${file.name}`);
+
skippedFiles.push({
+
name: file.name,
+
reason: '.git directory excluded'
+
});
+
continue;
+
}
+
+
// Skip files that are too large
+
const maxSize = MAX_FILE_SIZE;
+
if (file.size > maxSize) {
+
skippedFiles.push({
+
name: file.name,
+
reason: `file too large (${(file.size / 1024 / 1024).toFixed(2)}MB, max 100MB)`
+
});
+
continue;
+
}
+
+
const arrayBuffer = await file.arrayBuffer();
+
const originalContent = Buffer.from(arrayBuffer);
+
const originalMimeType = file.type || 'application/octet-stream';
+
+
// Compress and base64 encode ALL files
+
const compressedContent = compressFile(originalContent);
+
const base64Content = Buffer.from(compressedContent.toString('base64'), 'binary');
+
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
+
console.log(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${base64Content.length} bytes`);
+
logger.info(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${base64Content.length} bytes`);
+
+
uploadedFiles.push({
+
name: file.name,
+
content: base64Content,
+
mimeType: originalMimeType,
+
size: base64Content.length,
+
compressed: true,
+
originalMimeType
+
});
+
}
+
+
// Update total file count after filtering (important for progress tracking)
+
updateJobProgress(jobId, {
+
totalFiles: uploadedFiles.length
+
});
+
+
// Check total size limit
+
const totalSize = uploadedFiles.reduce((sum, file) => sum + file.size, 0);
+
const maxTotalSize = MAX_SITE_SIZE;
+
+
if (totalSize > maxTotalSize) {
+
throw new Error(`Total upload size ${(totalSize / 1024 / 1024).toFixed(2)}MB exceeds 300MB limit`);
+
}
+
+
// Check file count limit
+
if (uploadedFiles.length > MAX_FILE_COUNT) {
+
throw new Error(`File count ${uploadedFiles.length} exceeds ${MAX_FILE_COUNT} files limit`);
+
}
+
+
console.log(`After filtering: ${uploadedFiles.length} files to process (${skippedFiles.length} skipped)`);
+
+
if (uploadedFiles.length === 0) {
+
// Create empty manifest
+
const emptyManifest = {
+
$type: 'place.wisp.fs',
+
site: siteName,
+
root: {
+
type: 'directory',
+
entries: []
+
},
+
fileCount: 0,
+
createdAt: new Date().toISOString()
+
};
+
+
const validationResult = validateRecord(emptyManifest);
+
if (!validationResult.success) {
+
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
+
}
+
+
const rkey = siteName;
+
updateJobProgress(jobId, { phase: 'finalizing' });
+
+
const record = await agent.com.atproto.repo.putRecord({
+
repo: did,
+
collection: 'place.wisp.fs',
+
rkey: rkey,
+
record: emptyManifest
+
});
+
+
await upsertSite(did, rkey, siteName);
+
+
completeUploadJob(jobId, {
+
success: true,
+
uri: record.data.uri,
+
cid: record.data.cid,
+
fileCount: 0,
+
siteName,
+
skippedFiles
+
});
+
return;
+
}
+
+
// Process files into directory structure
+
console.log('Processing uploaded files into directory structure...');
+
const validUploadedFiles = uploadedFiles.filter((f, i) => {
+
if (!f || !f.name || !f.content) {
+
console.error(`Filtering out invalid file at index ${i}`);
+
return false;
+
}
+
return true;
+
});
+
+
const { directory, fileCount } = processUploadedFiles(validUploadedFiles);
+
console.log('Directory structure created, file count:', fileCount);
+
+
// Upload files as blobs with retry logic for DPoP nonce conflicts
+
console.log('Starting blob upload/reuse phase...');
+
updateJobProgress(jobId, { phase: 'uploading' });
+
+
// Helper function to upload blob with exponential backoff retry
+
const uploadBlobWithRetry = async (
+
agent: Agent,
+
content: Buffer,
+
mimeType: string,
+
fileName: string,
+
maxRetries = 3
+
) => {
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
+
try {
+
return await agent.com.atproto.repo.uploadBlob(content, { encoding: mimeType });
+
} catch (error: any) {
+
const isDPoPNonceError =
+
error?.message?.toLowerCase().includes('nonce') ||
+
error?.message?.toLowerCase().includes('dpop') ||
+
error?.status === 409;
+
+
if (isDPoPNonceError && attempt < maxRetries - 1) {
+
const backoffMs = 100 * Math.pow(2, attempt); // 100ms, 200ms, 400ms
+
logger.info(`[File Upload] 🔄 DPoP nonce conflict for ${fileName}, retrying in ${backoffMs}ms (attempt ${attempt + 1}/${maxRetries})`);
+
await new Promise(resolve => setTimeout(resolve, backoffMs));
+
continue;
+
}
+
throw error;
+
}
+
}
+
throw new Error(`Failed to upload ${fileName} after ${maxRetries} attempts`);
+
};
+
+
// Use sliding window concurrency for maximum throughput
+
const CONCURRENCY_LIMIT = 50; // Maximum concurrent uploads with retry logic
+
const uploadedBlobs: Array<{
+
result: FileUploadResult;
+
filePath: string;
+
sentMimeType: string;
+
returnedMimeType: string;
+
reused: boolean;
+
}> = [];
+
+
// Process file with sliding window concurrency
+
const processFile = async (file: UploadedFile, index: number) => {
+
try {
+
if (!file || !file.name) {
+
throw new Error(`Undefined file at index ${index}`);
+
}
+
+
const fileCID = computeCID(file.content);
+
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
+
const existingBlob = existingBlobMap.get(normalizedPath) || existingBlobMap.get(file.name);
+
+
if (existingBlob && existingBlob.cid === fileCID) {
+
logger.info(`[File Upload] ♻️ Reused: ${file.name} (unchanged, CID: ${fileCID})`);
+
updateJobProgress(jobId, { filesReused: (getUploadJob(jobId)?.progress.filesReused || 0) + 1 });
+
+
return {
+
result: {
+
hash: existingBlob.cid,
+
blobRef: existingBlob.blobRef,
+
...(file.compressed && {
+
encoding: 'gzip' as const,
+
mimeType: file.originalMimeType || file.mimeType,
+
base64: true
+
})
+
},
+
filePath: file.name,
+
sentMimeType: file.mimeType,
+
returnedMimeType: existingBlob.blobRef.mimeType,
+
reused: true
+
};
+
}
+
+
const uploadMimeType = file.compressed || file.mimeType.startsWith('text/html')
+
? 'application/octet-stream'
+
: file.mimeType;
+
+
const compressionInfo = file.compressed ? ' (gzipped)' : '';
+
const fileSizeMB = (file.size / 1024 / 1024).toFixed(2);
+
logger.info(`[File Upload] ⬆️ Uploading: ${file.name} (${fileSizeMB}MB${compressionInfo})`);
+
+
const uploadResult = await uploadBlobWithRetry(
+
agent,
+
file.content,
+
uploadMimeType,
+
file.name
+
);
+
+
const returnedBlobRef = uploadResult.data.blob;
+
updateJobProgress(jobId, { filesUploaded: (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1 });
+
logger.info(`[File Upload] ✅ Uploaded: ${file.name} (CID: ${fileCID})`);
+
+
return {
+
result: {
+
hash: returnedBlobRef.ref.toString(),
+
blobRef: returnedBlobRef,
+
...(file.compressed && {
+
encoding: 'gzip' as const,
+
mimeType: file.originalMimeType || file.mimeType,
+
base64: true
+
})
+
},
+
filePath: file.name,
+
sentMimeType: file.mimeType,
+
returnedMimeType: returnedBlobRef.mimeType,
+
reused: false
+
};
+
} catch (uploadError) {
+
logger.error('Upload failed for file', uploadError);
+
throw uploadError;
+
}
+
};
+
+
// Sliding window concurrency control
+
const processWithConcurrency = async () => {
+
const results: any[] = [];
+
let fileIndex = 0;
+
const executing = new Set<Promise<void>>();
+
+
for (const file of validUploadedFiles) {
+
const currentIndex = fileIndex++;
+
+
const promise = processFile(file, currentIndex)
+
.then(result => {
+
results[currentIndex] = result;
+
})
+
.catch(error => {
+
logger.error(`Failed to process file at index ${currentIndex}`, error);
+
throw error; // Re-throw to fail the entire upload
+
})
+
.finally(() => {
+
executing.delete(promise);
+
});
+
+
executing.add(promise);
+
+
if (executing.size >= CONCURRENCY_LIMIT) {
+
await Promise.race(executing);
+
}
+
}
+
+
// Wait for remaining uploads
+
await Promise.all(executing);
+
return results.filter(r => r !== undefined); // Filter out any undefined entries
+
};
+
+
const allResults = await processWithConcurrency();
+
uploadedBlobs.push(...allResults);
+
+
const currentReused = uploadedBlobs.filter(b => b.reused).length;
+
const currentUploaded = uploadedBlobs.filter(b => !b.reused).length;
+
logger.info(`[File Upload] 🎉 Upload complete → ${uploadedBlobs.length}/${validUploadedFiles.length} files (${currentUploaded} uploaded, ${currentReused} reused)`);
+
+
const reusedCount = uploadedBlobs.filter(b => b.reused).length;
+
const uploadedCount = uploadedBlobs.filter(b => !b.reused).length;
+
logger.info(`[File Upload] 🎉 Upload phase complete! Total: ${uploadedBlobs.length} files (${uploadedCount} uploaded, ${reusedCount} reused)`);
+
+
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
+
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
+
+
// Update directory with file blobs
+
console.log('Updating directory with blob references...');
+
updateJobProgress(jobId, { phase: 'creating_manifest' });
+
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
+
+
// Check if we need to split into subfs records
+
// Split proactively if we have lots of files to avoid hitting manifest size limits
+
const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB)
+
const FILE_COUNT_THRESHOLD = 250; // Start splitting early
+
const subfsRecords: Array<{ uri: string; path: string }> = [];
+
let workingDirectory = updatedDirectory;
+
let currentFileCount = fileCount;
+
+
// Create initial manifest to check size
+
let manifest = createManifest(siteName, workingDirectory, fileCount);
+
let manifestSize = JSON.stringify(manifest).length;
+
+
// Split if we have lots of files OR if manifest is already too large
+
if (fileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
+
console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
+
logger.info(`Large site with ${fileCount} files, splitting into subfs records`);
+
+
// Keep splitting until manifest fits under limit
+
let attempts = 0;
+
const MAX_ATTEMPTS = 100; // Allow many splits for very large sites
+
+
while (manifestSize > MAX_MANIFEST_SIZE && attempts < MAX_ATTEMPTS) {
+
attempts++;
+
+
// Find all directories sorted by size (largest first)
+
const directories = findLargeDirectories(workingDirectory);
+
directories.sort((a, b) => b.size - a.size);
+
+
if (directories.length === 0) {
+
// No more directories to split - this should be very rare
+
throw new Error(
+
`Cannot split manifest further - no subdirectories available. ` +
+
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
+
`Try organizing files into subdirectories.`
+
);
+
}
+
+
// Pick the largest directory
+
const largestDir = directories[0];
+
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
+
+
// Create a subfs record for this directory
+
const subfsRkey = TID.nextStr();
+
const subfsManifest = {
+
$type: 'place.wisp.subfs' as const,
+
root: largestDir.directory,
+
fileCount: largestDir.fileCount,
+
createdAt: new Date().toISOString()
+
};
+
+
// Validate subfs record
+
const subfsValidation = validateSubfsRecord(subfsManifest);
+
if (!subfsValidation.success) {
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
+
}
+
+
// Upload subfs record to PDS
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
+
repo: did,
+
collection: 'place.wisp.subfs',
+
rkey: subfsRkey,
+
record: subfsManifest
+
});
+
+
const subfsUri = subfsRecord.data.uri;
+
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
+
console.log(` ✅ Created subfs: ${subfsUri}`);
+
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
+
+
// Replace directory with subfs node in the main tree
+
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
+
+
// Recreate manifest and check new size
+
currentFileCount -= largestDir.fileCount;
+
manifest = createManifest(siteName, workingDirectory, fileCount);
+
manifestSize = JSON.stringify(manifest).length;
+
const newSizeKB = (manifestSize / 1024).toFixed(1);
+
console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);
+
+
// Check if we're under the limit now
+
if (manifestSize <= MAX_MANIFEST_SIZE) {
+
console.log(` ✅ Manifest fits! (${newSizeKB}KB < 140KB)`);
+
break;
+
}
+
}
+
+
if (manifestSize > MAX_MANIFEST_SIZE) {
+
throw new Error(
+
`Failed to fit manifest after splitting ${attempts} directories. ` +
+
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
+
`This should never happen - please report this issue.`
+
);
+
}
+
+
console.log(`✅ Split complete: ${subfsRecords.length} subfs records, ${currentFileCount} files in main, ${(manifestSize / 1024).toFixed(1)}KB manifest`);
+
logger.info(`Split into ${subfsRecords.length} subfs records, ${currentFileCount} files remaining in main tree`);
+
} else {
+
const manifestSizeKB = (manifestSize / 1024).toFixed(1);
+
console.log(`Manifest created (${fileCount} files, ${manifestSizeKB}KB JSON) - no splitting needed`);
+
}
+
+
const rkey = siteName;
+
updateJobProgress(jobId, { phase: 'finalizing' });
+
+
console.log('Putting record to PDS with rkey:', rkey);
+
const record = await agent.com.atproto.repo.putRecord({
+
repo: did,
+
collection: 'place.wisp.fs',
+
rkey: rkey,
+
record: manifest
+
});
+
console.log('Record successfully created on PDS:', record.data.uri);
+
+
// Store site in database cache
+
await upsertSite(did, rkey, siteName);
+
+
// Clean up old subfs records if we had any
+
if (oldSubfsUris.length > 0) {
+
console.log(`Cleaning up ${oldSubfsUris.length} old subfs records...`);
+
logger.info(`Cleaning up ${oldSubfsUris.length} old subfs records`);
+
+
// Delete old subfs records in parallel (don't wait for completion)
+
Promise.all(
+
oldSubfsUris.map(async ({ uri }) => {
+
try {
+
// Parse URI: at://did/collection/rkey
+
const parts = uri.replace('at://', '').split('/');
+
const subRkey = parts[2];
+
+
await agent.com.atproto.repo.deleteRecord({
+
repo: did,
+
collection: 'place.wisp.subfs',
+
rkey: subRkey
+
});
+
+
console.log(` 🗑️ Deleted old subfs: ${uri}`);
+
logger.info(`Deleted old subfs record: ${uri}`);
+
} catch (err: any) {
+
// Don't fail the whole upload if cleanup fails
+
console.warn(`Failed to delete old subfs ${uri}:`, err?.message);
+
logger.warn(`Failed to delete old subfs ${uri}`, err);
+
}
+
})
+
).catch(err => {
+
// Log but don't fail if cleanup fails
+
logger.warn('Some subfs cleanup operations failed', err);
+
});
+
}
+
+
completeUploadJob(jobId, {
+
success: true,
+
uri: record.data.uri,
+
cid: record.data.cid,
+
fileCount,
+
siteName,
+
skippedFiles,
+
uploadedCount: validUploadedFiles.length
+
});
+
+
console.log('=== UPLOAD FILES COMPLETE ===');
+
} catch (error) {
+
console.error('=== UPLOAD ERROR ===');
+
console.error('Error details:', error);
+
logger.error('Upload error', error);
+
failUploadJob(jobId, error instanceof Error ? error.message : 'Unknown error');
+
}
+
}
+
export const wispRoutes = (client: NodeOAuthClient, cookieSecret: string) =>
new Elysia({
prefix: '/wisp',
···
const auth = await requireAuth(client, cookie)
return { auth }
})
+
.get(
+
'/upload-progress/:jobId',
+
async ({ params: { jobId }, auth, set }) => {
+
const job = getUploadJob(jobId);
+
+
if (!job) {
+
set.status = 404;
+
return { error: 'Job not found' };
+
}
+
+
// Verify job belongs to authenticated user
+
if (job.did !== auth.did) {
+
set.status = 403;
+
return { error: 'Unauthorized' };
+
}
+
+
// Set up SSE headers
+
set.headers = {
+
'Content-Type': 'text/event-stream',
+
'Cache-Control': 'no-cache',
+
'Connection': 'keep-alive'
+
};
+
+
const stream = new ReadableStream({
+
start(controller) {
+
const encoder = new TextEncoder();
+
+
// Send initial state
+
const sendEvent = (event: string, data: any) => {
+
try {
+
const message = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
+
controller.enqueue(encoder.encode(message));
+
} catch (err) {
+
// Controller closed, ignore
+
}
+
};
+
+
// Send keepalive comment every 15 seconds to prevent timeout
+
const keepaliveInterval = setInterval(() => {
+
try {
+
controller.enqueue(encoder.encode(': keepalive\n\n'));
+
} catch (err) {
+
// Controller closed, stop sending keepalives
+
clearInterval(keepaliveInterval);
+
}
+
}, 15000);
+
+
// Send current job state immediately
+
sendEvent('progress', {
+
status: job.status,
+
progress: job.progress,
+
result: job.result,
+
error: job.error
+
});
+
+
// If job is already completed or failed, close the stream
+
if (job.status === 'completed' || job.status === 'failed') {
+
clearInterval(keepaliveInterval);
+
controller.close();
+
return;
+
}
+
+
// Listen for updates
+
const cleanup = addJobListener(jobId, (event, data) => {
+
sendEvent(event, data);
+
+
// Close stream after done or error event
+
if (event === 'done' || event === 'error') {
+
clearInterval(keepaliveInterval);
+
setTimeout(() => {
+
try {
+
controller.close();
+
} catch (err) {
+
// Already closed
+
}
+
}, 100);
+
}
+
});
+
+
// Cleanup on disconnect
+
return () => {
+
clearInterval(keepaliveInterval);
+
cleanup();
+
};
+
}
+
});
+
+
return new Response(stream);
+
}
+
)
.post(
'/upload-files',
async ({ body, auth }) => {
···
const hasFiles = files && (Array.isArray(files) ? files.length > 0 : !!files);
if (!hasFiles) {
-
// Create agent with OAuth session
+
// Handle empty upload synchronously (fast operation)
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
-
// Create empty manifest
const emptyManifest = {
$type: 'place.wisp.fs',
site: siteName,
···
createdAt: new Date().toISOString()
};
-
// Validate the manifest
const validationResult = validateRecord(emptyManifest);
if (!validationResult.success) {
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
}
-
// Use site name as rkey
const rkey = siteName;
const record = await agent.com.atproto.repo.putRecord({
···
};
}
+
// For file uploads, create a job and process in background
+
const fileArray = Array.isArray(files) ? files : [files];
+
const jobId = createUploadJob(auth.did, siteName, fileArray.length);
+
// Create agent with OAuth session
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
console.log('Agent created for DID:', auth.did);
-
-
// Try to fetch existing record to enable incremental updates
-
let existingBlobMap = new Map<string, { blobRef: any; cid: string }>();
-
console.log('Attempting to fetch existing record...');
-
try {
-
const rkey = siteName;
-
const existingRecord = await agent.com.atproto.repo.getRecord({
-
repo: auth.did,
-
collection: 'place.wisp.fs',
-
rkey: rkey
-
});
-
console.log('Existing record found!');
-
-
if (existingRecord.data.value && typeof existingRecord.data.value === 'object' && 'root' in existingRecord.data.value) {
-
const manifest = existingRecord.data.value as any;
-
existingBlobMap = extractBlobMap(manifest.root);
-
console.log(`Found existing manifest with ${existingBlobMap.size} files for incremental update`);
-
logger.info(`Found existing manifest with ${existingBlobMap.size} files for incremental update`);
-
}
-
} catch (error: any) {
-
console.log('No existing record found or error:', error?.message || error);
-
// Record doesn't exist yet, this is a new site
-
if (error?.status !== 400 && error?.error !== 'RecordNotFound') {
-
logger.warn('Failed to fetch existing record, proceeding with full upload', error);
-
}
-
}
-
-
// Convert File objects to UploadedFile format
-
// Elysia gives us File objects directly, handle both single file and array
-
const fileArray = Array.isArray(files) ? files : [files];
-
const uploadedFiles: UploadedFile[] = [];
-
const skippedFiles: Array<{ name: string; reason: string }> = [];
-
-
console.log('Processing files, count:', fileArray.length);
-
-
for (let i = 0; i < fileArray.length; i++) {
-
const file = fileArray[i];
-
console.log(`Processing file ${i + 1}/${fileArray.length}:`, file.name, file.size, 'bytes');
-
-
// Skip files that are too large (limit to 100MB per file)
-
const maxSize = MAX_FILE_SIZE; // 100MB
-
if (file.size > maxSize) {
-
skippedFiles.push({
-
name: file.name,
-
reason: `file too large (${(file.size / 1024 / 1024).toFixed(2)}MB, max 100MB)`
-
});
-
continue;
-
}
-
-
const arrayBuffer = await file.arrayBuffer();
-
const originalContent = Buffer.from(arrayBuffer);
-
const originalMimeType = file.type || 'application/octet-stream';
-
-
// Compress and base64 encode ALL files
-
const compressedContent = compressFile(originalContent);
-
// Base64 encode the gzipped content to prevent PDS content sniffing
-
// Convert base64 string to bytes using binary encoding (each char becomes exactly one byte)
-
// This is what PDS receives and computes CID on
-
const base64Content = Buffer.from(compressedContent.toString('base64'), 'binary');
-
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
-
console.log(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${base64Content.length} bytes`);
-
logger.info(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${base64Content.length} bytes`);
-
-
uploadedFiles.push({
-
name: file.name,
-
content: base64Content, // This is the gzipped+base64 content that will be uploaded and CID-computed
-
mimeType: originalMimeType,
-
size: base64Content.length,
-
compressed: true,
-
originalMimeType
-
});
-
}
-
-
// Check total size limit (300MB)
-
const totalSize = uploadedFiles.reduce((sum, file) => sum + file.size, 0);
-
const maxTotalSize = MAX_SITE_SIZE; // 300MB
-
-
if (totalSize > maxTotalSize) {
-
throw new Error(`Total upload size ${(totalSize / 1024 / 1024).toFixed(2)}MB exceeds 300MB limit`);
-
}
-
-
// Check file count limit (2000 files)
-
if (uploadedFiles.length > MAX_FILE_COUNT) {
-
throw new Error(`File count ${uploadedFiles.length} exceeds ${MAX_FILE_COUNT} files limit`);
-
}
-
-
if (uploadedFiles.length === 0) {
-
-
// Create empty manifest
-
const emptyManifest = {
-
$type: 'place.wisp.fs',
-
site: siteName,
-
root: {
-
type: 'directory',
-
entries: []
-
},
-
fileCount: 0,
-
createdAt: new Date().toISOString()
-
};
-
-
// Validate the manifest
-
const validationResult = validateRecord(emptyManifest);
-
if (!validationResult.success) {
-
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
-
}
+
console.log('Created upload job:', jobId);
-
// Use site name as rkey
-
const rkey = siteName;
-
-
const record = await agent.com.atproto.repo.putRecord({
-
repo: auth.did,
-
collection: 'place.wisp.fs',
-
rkey: rkey,
-
record: emptyManifest
-
});
-
-
await upsertSite(auth.did, rkey, siteName);
-
-
return {
-
success: true,
-
uri: record.data.uri,
-
cid: record.data.cid,
-
fileCount: 0,
-
siteName,
-
skippedFiles,
-
message: 'Site created but no valid web files were found to upload'
-
};
-
}
-
-
// Process files into directory structure
-
console.log('Processing uploaded files into directory structure...');
-
console.log('uploadedFiles array length:', uploadedFiles.length);
-
console.log('uploadedFiles contents:', uploadedFiles.map((f, i) => `${i}: ${f?.name || 'UNDEFINED'}`));
-
-
// Filter out any undefined/null/invalid entries (defensive)
-
const validUploadedFiles = uploadedFiles.filter((f, i) => {
-
if (!f) {
-
console.error(`Filtering out undefined/null file at index ${i}`);
-
return false;
-
}
-
if (!f.name) {
-
console.error(`Filtering out file with no name at index ${i}:`, f);
-
return false;
-
}
-
if (!f.content) {
-
console.error(`Filtering out file with no content at index ${i}:`, f.name);
-
return false;
-
}
-
return true;
+
// Start background processing (don't await)
+
processUploadInBackground(jobId, agent, auth.did, siteName, fileArray).catch(err => {
+
console.error('Background upload process failed:', err);
+
logger.error('Background upload process failed', err);
});
-
if (validUploadedFiles.length !== uploadedFiles.length) {
-
console.warn(`Filtered out ${uploadedFiles.length - validUploadedFiles.length} invalid files`);
-
}
-
console.log('validUploadedFiles length:', validUploadedFiles.length);
-
const { directory, fileCount } = processUploadedFiles(validUploadedFiles);
-
console.log('Directory structure created, file count:', fileCount);
-
-
// Upload files as blobs in parallel (or reuse existing blobs with matching CIDs)
-
console.log('Starting blob upload/reuse phase...');
-
// For compressed files, we upload as octet-stream and store the original MIME type in metadata
-
// For text/html files, we also use octet-stream as a workaround for PDS image pipeline issues
-
const uploadPromises = validUploadedFiles.map(async (file, i) => {
-
try {
-
// Skip undefined files (shouldn't happen after filter, but defensive)
-
if (!file || !file.name) {
-
console.error(`ERROR: Undefined file at index ${i} in validUploadedFiles!`);
-
throw new Error(`Undefined file at index ${i}`);
-
}
-
-
// Compute CID for this file to check if it already exists
-
// Note: file.content is already gzipped+base64 encoded
-
const fileCID = computeCID(file.content);
-
-
// Normalize the file path for comparison (remove base folder prefix like "cobblemon/")
-
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
-
-
// Check if we have an existing blob with the same CID
-
// Try both the normalized path and the full path
-
const existingBlob = existingBlobMap.get(normalizedPath) || existingBlobMap.get(file.name);
-
-
if (existingBlob && existingBlob.cid === fileCID) {
-
// Reuse existing blob - no need to upload
-
logger.info(`[File Upload] Reusing existing blob for: ${file.name} (CID: ${fileCID})`);
-
-
return {
-
result: {
-
hash: existingBlob.cid,
-
blobRef: existingBlob.blobRef,
-
...(file.compressed && {
-
encoding: 'gzip' as const,
-
mimeType: file.originalMimeType || file.mimeType,
-
base64: true
-
})
-
},
-
filePath: file.name,
-
sentMimeType: file.mimeType,
-
returnedMimeType: existingBlob.blobRef.mimeType,
-
reused: true
-
};
-
}
-
-
// File is new or changed - upload it
-
// If compressed, always upload as octet-stream
-
// Otherwise, workaround: PDS incorrectly processes text/html through image pipeline
-
const uploadMimeType = file.compressed || file.mimeType.startsWith('text/html')
-
? 'application/octet-stream'
-
: file.mimeType;
-
-
const compressionInfo = file.compressed ? ' (gzipped)' : '';
-
logger.info(`[File Upload] Uploading new/changed file: ${file.name} (original: ${file.mimeType}, sending as: ${uploadMimeType}, ${file.size} bytes${compressionInfo}, CID: ${fileCID})`);
-
-
const uploadResult = await agent.com.atproto.repo.uploadBlob(
-
file.content,
-
{
-
encoding: uploadMimeType
-
}
-
);
-
-
const returnedBlobRef = uploadResult.data.blob;
-
-
// Use the blob ref exactly as returned from PDS
-
return {
-
result: {
-
hash: returnedBlobRef.ref.toString(),
-
blobRef: returnedBlobRef,
-
...(file.compressed && {
-
encoding: 'gzip' as const,
-
mimeType: file.originalMimeType || file.mimeType,
-
base64: true
-
})
-
},
-
filePath: file.name,
-
sentMimeType: file.mimeType,
-
returnedMimeType: returnedBlobRef.mimeType,
-
reused: false
-
};
-
} catch (uploadError) {
-
logger.error('Upload failed for file', uploadError);
-
throw uploadError;
-
}
-
});
-
-
// Wait for all uploads to complete
-
const uploadedBlobs = await Promise.all(uploadPromises);
-
-
// Count reused vs uploaded blobs
-
const reusedCount = uploadedBlobs.filter(b => (b as any).reused).length;
-
const uploadedCount = uploadedBlobs.filter(b => !(b as any).reused).length;
-
console.log(`Blob statistics: ${reusedCount} reused, ${uploadedCount} uploaded, ${uploadedBlobs.length} total`);
-
logger.info(`Blob statistics: ${reusedCount} reused, ${uploadedCount} uploaded, ${uploadedBlobs.length} total`);
-
-
// Extract results and file paths in correct order
-
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
-
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
-
-
// Update directory with file blobs
-
console.log('Updating directory with blob references...');
-
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
-
-
// Create manifest
-
console.log('Creating manifest...');
-
const manifest = createManifest(siteName, updatedDirectory, fileCount);
-
console.log('Manifest created successfully');
-
-
// Use site name as rkey
-
const rkey = siteName;
-
-
let record;
-
try {
-
console.log('Putting record to PDS with rkey:', rkey);
-
record = await agent.com.atproto.repo.putRecord({
-
repo: auth.did,
-
collection: 'place.wisp.fs',
-
rkey: rkey,
-
record: manifest
-
});
-
console.log('Record successfully created on PDS:', record.data.uri);
-
} catch (putRecordError: any) {
-
console.error('FAILED to create record on PDS:', putRecordError);
-
logger.error('Failed to create record on PDS', putRecordError);
-
-
throw putRecordError;
-
}
-
-
// Store site in database cache
-
await upsertSite(auth.did, rkey, siteName);
-
-
const result = {
+
// Return immediately with job ID
+
return {
success: true,
-
uri: record.data.uri,
-
cid: record.data.cid,
-
fileCount,
-
siteName,
-
skippedFiles,
-
uploadedCount: validUploadedFiles.length
+
jobId,
+
message: 'Upload started. Connect to /wisp/upload-progress/' + jobId + ' for progress updates.'
};
-
-
console.log('=== UPLOAD FILES COMPLETE ===');
-
return result;
} catch (error) {
console.error('=== UPLOAD ERROR ===');
console.error('Error details:', error);
-
console.error('Stack trace:', error instanceof Error ? error.stack : 'N/A');
-
logger.error('Upload error', error, {
-
message: error instanceof Error ? error.message : 'Unknown error',
-
name: error instanceof Error ? error.name : undefined
-
});
+
logger.error('Upload error', error);
throw new Error(`Failed to upload files: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}