···
import { Agent } from '@atproto/api'
import { TID } from '@atproto/common-web'
8
-
type FileUploadResult,
9
-
processUploadedFiles,
17
-
findLargeDirectories,
18
-
replaceDirectoryWithSubfs,
19
-
estimateDirectorySize
8
+
type FileUploadResult,
9
+
processUploadedFiles,
17
+
findLargeDirectories,
18
+
replaceDirectoryWithSubfs,
19
+
estimateDirectorySize
} from '../lib/wisp-utils'
import { upsertSite } from '../lib/db'
import { logger } from '../lib/observability'
···
import { validateRecord as validateSubfsRecord } from '../lexicons/types/place/wisp/subfs'
import { MAX_SITE_SIZE, MAX_FILE_SIZE, MAX_FILE_COUNT } from '../lib/constants'
} from '../lib/upload-jobs'
function isValidSiteName(siteName: string): boolean {
36
-
if (!siteName || typeof siteName !== 'string') return false;
36
+
if (!siteName || typeof siteName !== 'string') return false;
38
-
// Length check (AT Protocol rkey limit)
39
-
if (siteName.length < 1 || siteName.length > 512) return false;
38
+
// Length check (AT Protocol rkey limit)
39
+
if (siteName.length < 1 || siteName.length > 512) return false;
41
-
// Check for path traversal
42
-
if (siteName === '.' || siteName === '..') return false;
43
-
if (siteName.includes('/') || siteName.includes('\\')) return false;
44
-
if (siteName.includes('\0')) return false;
41
+
// Check for path traversal
42
+
if (siteName === '.' || siteName === '..') return false;
43
+
if (siteName.includes('/') || siteName.includes('\\')) return false;
44
+
if (siteName.includes('\0')) return false;
46
-
// AT Protocol rkey format: alphanumeric, dots, dashes, underscores, tildes, colons
47
-
// Based on NSID format rules
48
-
const validRkeyPattern = /^[a-zA-Z0-9._~:-]+$/;
49
-
if (!validRkeyPattern.test(siteName)) return false;
46
+
// AT Protocol rkey format: alphanumeric, dots, dashes, underscores, tildes, colons
47
+
// Based on NSID format rules
48
+
const validRkeyPattern = /^[a-zA-Z0-9._~:-]+$/;
49
+
if (!validRkeyPattern.test(siteName)) return false;
async function processUploadInBackground(
62
-
// Try to fetch existing record to enable incremental updates
63
-
let existingBlobMap = new Map<string, { blobRef: any; cid: string }>();
64
-
let oldSubfsUris: Array<{ uri: string; path: string }> = [];
65
-
console.log('Attempting to fetch existing record...');
66
-
updateJobProgress(jobId, { phase: 'validating' });
62
+
// Try to fetch existing record to enable incremental updates
63
+
let existingBlobMap = new Map<string, { blobRef: any; cid: string }>();
64
+
let oldSubfsUris: Array<{ uri: string; path: string }> = [];
65
+
console.log('Attempting to fetch existing record...');
66
+
updateJobProgress(jobId, { phase: 'validating' });
69
-
const rkey = siteName;
70
-
const existingRecord = await agent.com.atproto.repo.getRecord({
72
-
collection: 'place.wisp.fs',
75
-
console.log('Existing record found!');
69
+
const rkey = siteName;
70
+
const existingRecord = await agent.com.atproto.repo.getRecord({
72
+
collection: 'place.wisp.fs',
75
+
console.log('Existing record found!');
77
-
if (existingRecord.data.value && typeof existingRecord.data.value === 'object' && 'root' in existingRecord.data.value) {
78
-
const manifest = existingRecord.data.value as any;
77
+
if (existingRecord.data.value && typeof existingRecord.data.value === 'object' && 'root' in existingRecord.data.value) {
78
+
const manifest = existingRecord.data.value as any;
80
-
// Extract blob map from main record
81
-
existingBlobMap = extractBlobMap(manifest.root);
82
-
console.log(`Found existing manifest with ${existingBlobMap.size} files in main record`);
80
+
// Extract blob map from main record
81
+
existingBlobMap = extractBlobMap(manifest.root);
82
+
console.log(`Found existing manifest with ${existingBlobMap.size} files in main record`);
84
-
// Extract subfs URIs with their mount paths from main record
85
-
const subfsUris = extractSubfsUris(manifest.root);
86
-
oldSubfsUris = subfsUris; // Save for cleanup later
84
+
// Extract subfs URIs with their mount paths from main record
85
+
const subfsUris = extractSubfsUris(manifest.root);
86
+
oldSubfsUris = subfsUris; // Save for cleanup later
88
-
if (subfsUris.length > 0) {
89
-
console.log(`Found ${subfsUris.length} subfs records, fetching in parallel...`);
90
-
logger.info(`Fetching ${subfsUris.length} subfs records for blob reuse`);
88
+
if (subfsUris.length > 0) {
89
+
console.log(`Found ${subfsUris.length} subfs records, fetching in parallel...`);
90
+
logger.info(`Fetching ${subfsUris.length} subfs records for blob reuse`);
92
-
// Fetch all subfs records in parallel
93
-
const subfsRecords = await Promise.all(
94
-
subfsUris.map(async ({ uri, path }) => {
96
-
// Parse URI: at://did/collection/rkey
97
-
const parts = uri.replace('at://', '').split('/');
98
-
const subDid = parts[0];
99
-
const collection = parts[1];
100
-
const subRkey = parts[2];
92
+
// Fetch all subfs records in parallel
93
+
const subfsRecords = await Promise.all(
94
+
subfsUris.map(async ({ uri, path }) => {
96
+
// Parse URI: at://did/collection/rkey
97
+
const parts = uri.replace('at://', '').split('/');
98
+
const subDid = parts[0];
99
+
const collection = parts[1];
100
+
const subRkey = parts[2];
102
-
const record = await agent.com.atproto.repo.getRecord({
104
-
collection: collection,
102
+
const record = await agent.com.atproto.repo.getRecord({
104
+
collection: collection,
108
-
return { record: record.data.value as any, mountPath: path };
109
-
} catch (err: any) {
110
-
logger.warn(`Failed to fetch subfs record ${uri}: ${err?.message}`, err);
108
+
return { record: record.data.value as any, mountPath: path };
109
+
} catch (err: any) {
110
+
logger.warn(`Failed to fetch subfs record ${uri}: ${err?.message}`, err);
116
-
// Merge blob maps from all subfs records
117
-
let totalSubfsBlobs = 0;
118
-
for (const subfsData of subfsRecords) {
119
-
if (subfsData && subfsData.record && 'root' in subfsData.record) {
120
-
// Extract blobs with the correct mount path prefix
121
-
const subfsMap = extractBlobMap(subfsData.record.root, subfsData.mountPath);
122
-
subfsMap.forEach((value, key) => {
123
-
existingBlobMap.set(key, value);
116
+
// Merge blob maps from all subfs records
117
+
let totalSubfsBlobs = 0;
118
+
for (const subfsData of subfsRecords) {
119
+
if (subfsData && subfsData.record && 'root' in subfsData.record) {
120
+
// Extract blobs with the correct mount path prefix
121
+
const subfsMap = extractBlobMap(subfsData.record.root, subfsData.mountPath);
122
+
subfsMap.forEach((value, key) => {
123
+
existingBlobMap.set(key, value);
129
-
console.log(`Merged ${totalSubfsBlobs} files from ${subfsUris.length} subfs records`);
130
-
logger.info(`Total blob map: ${existingBlobMap.size} files (main + subfs)`);
129
+
console.log(`Merged ${totalSubfsBlobs} files from ${subfsUris.length} subfs records`);
130
+
logger.info(`Total blob map: ${existingBlobMap.size} files (main + subfs)`);
133
-
console.log(`Total existing blobs for reuse: ${existingBlobMap.size} files`);
134
-
logger.info(`Found existing manifest with ${existingBlobMap.size} files for incremental update`);
136
-
} catch (error: any) {
137
-
console.log('No existing record found or error:', error?.message || error);
138
-
if (error?.status !== 400 && error?.error !== 'RecordNotFound') {
139
-
logger.warn('Failed to fetch existing record, proceeding with full upload', error);
133
+
console.log(`Total existing blobs for reuse: ${existingBlobMap.size} files`);
134
+
logger.info(`Found existing manifest with ${existingBlobMap.size} files for incremental update`);
136
+
} catch (error: any) {
137
+
console.log('No existing record found or error:', error?.message || error);
138
+
if (error?.status !== 400 && error?.error !== 'RecordNotFound') {
139
+
logger.warn('Failed to fetch existing record, proceeding with full upload', error);
143
-
// Convert File objects to UploadedFile format
144
-
const uploadedFiles: UploadedFile[] = [];
145
-
const skippedFiles: Array<{ name: string; reason: string }> = [];
143
+
// Convert File objects to UploadedFile format
144
+
const uploadedFiles: UploadedFile[] = [];
145
+
const skippedFiles: Array<{ name: string; reason: string }> = [];
147
-
console.log('Processing files, count:', fileArray.length);
148
-
updateJobProgress(jobId, { phase: 'compressing' });
147
+
console.log('Processing files, count:', fileArray.length);
148
+
updateJobProgress(jobId, { phase: 'compressing' });
150
-
for (let i = 0; i < fileArray.length; i++) {
151
-
const file = fileArray[i];
150
+
for (let i = 0; i < fileArray.length; i++) {
151
+
const file = fileArray[i];
153
-
// Skip undefined/null files
154
-
if (!file || !file.name) {
155
-
console.log(`Skipping undefined file at index ${i}`);
156
-
skippedFiles.push({
157
-
name: `[undefined file at index ${i}]`,
158
-
reason: 'Invalid file object'
153
+
// Skip undefined/null files
154
+
if (!file || !file.name) {
155
+
console.log(`Skipping undefined file at index ${i}`);
156
+
skippedFiles.push({
157
+
name: `[undefined file at index ${i}]`,
158
+
reason: 'Invalid file object'
163
-
console.log(`Processing file ${i + 1}/${fileArray.length}:`, file.name, file.size, 'bytes');
164
-
updateJobProgress(jobId, {
165
-
filesProcessed: i + 1,
166
-
currentFile: file.name
163
+
console.log(`Processing file ${i + 1}/${fileArray.length}:`, file.name, file.size, 'bytes');
164
+
updateJobProgress(jobId, {
165
+
filesProcessed: i + 1,
166
+
currentFile: file.name
169
-
// Skip .git directory files
170
-
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
171
-
if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
172
-
console.log(`Skipping .git file: ${file.name}`);
173
-
skippedFiles.push({
175
-
reason: '.git directory excluded'
169
+
// Skip .git directory files
170
+
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
171
+
if (normalizedPath.startsWith('.git/') || normalizedPath === '.git') {
172
+
console.log(`Skipping .git file: ${file.name}`);
173
+
skippedFiles.push({
175
+
reason: '.git directory excluded'
180
-
// Skip files that are too large
181
-
const maxSize = MAX_FILE_SIZE;
182
-
if (file.size > maxSize) {
183
-
skippedFiles.push({
185
-
reason: `file too large (${(file.size / 1024 / 1024).toFixed(2)}MB, max 100MB)`
180
+
// Skip files that are too large
181
+
const maxSize = MAX_FILE_SIZE;
182
+
if (file.size > maxSize) {
183
+
skippedFiles.push({
185
+
reason: `file too large (${(file.size / 1024 / 1024).toFixed(2)}MB, max 100MB)`
190
-
const arrayBuffer = await file.arrayBuffer();
191
-
const originalContent = Buffer.from(arrayBuffer);
192
-
const originalMimeType = file.type || 'application/octet-stream';
190
+
const arrayBuffer = await file.arrayBuffer();
191
+
const originalContent = Buffer.from(arrayBuffer);
192
+
const originalMimeType = file.type || 'application/octet-stream';
194
-
// Determine if file should be compressed
195
-
const shouldCompress = shouldCompressFile(originalMimeType);
194
+
// Determine if file should be compressed
195
+
const shouldCompress = shouldCompressFile(originalMimeType);
197
-
// Text files (HTML/CSS/JS) need base64 encoding to prevent PDS content sniffing
198
-
// Audio files just need compression without base64
199
-
const needsBase64 = originalMimeType.startsWith('text/') ||
200
-
originalMimeType.includes('html') ||
201
-
originalMimeType.includes('javascript') ||
202
-
originalMimeType.includes('css') ||
203
-
originalMimeType.includes('json') ||
204
-
originalMimeType.includes('xml') ||
205
-
originalMimeType.includes('svg');
197
+
// Text files (HTML/CSS/JS) need base64 encoding to prevent PDS content sniffing
198
+
// Audio files just need compression without base64
199
+
const needsBase64 =
200
+
originalMimeType.startsWith('text/') ||
201
+
originalMimeType.startsWith('application/json') ||
202
+
originalMimeType.startsWith('application/xml') ||
203
+
originalMimeType === 'image/svg+xml';
207
-
let finalContent: Buffer;
208
-
let compressed = false;
209
-
let base64Encoded = false;
205
+
let finalContent: Buffer;
206
+
let compressed = false;
207
+
let base64Encoded = false;
211
-
if (shouldCompress) {
212
-
const compressedContent = compressFile(originalContent);
209
+
if (shouldCompress) {
210
+
const compressedContent = compressFile(originalContent);
216
-
// Text files: compress AND base64 encode
217
-
finalContent = Buffer.from(compressedContent.toString('base64'), 'binary');
218
-
base64Encoded = true;
219
-
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
220
-
console.log(`Compressing+base64 ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${finalContent.length} bytes`);
221
-
logger.info(`Compressing+base64 ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${finalContent.length} bytes`);
223
-
// Audio files: just compress, no base64
224
-
finalContent = compressedContent;
225
-
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
226
-
console.log(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%)`);
227
-
logger.info(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%)`);
230
-
// Binary files: upload directly
231
-
finalContent = originalContent;
232
-
console.log(`Uploading ${file.name} directly: ${originalContent.length} bytes (no compression)`);
233
-
logger.info(`Uploading ${file.name} directly: ${originalContent.length} bytes (binary)`);
214
+
// Text files: compress AND base64 encode
215
+
finalContent = Buffer.from(compressedContent.toString('base64'), 'binary');
216
+
base64Encoded = true;
217
+
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
218
+
console.log(`Compressing+base64 ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${finalContent.length} bytes`);
219
+
logger.info(`Compressing+base64 ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%), base64: ${finalContent.length} bytes`);
221
+
// Audio files: just compress, no base64
222
+
finalContent = compressedContent;
223
+
const compressionRatio = (compressedContent.length / originalContent.length * 100).toFixed(1);
224
+
console.log(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%)`);
225
+
logger.info(`Compressing ${file.name}: ${originalContent.length} -> ${compressedContent.length} bytes (${compressionRatio}%)`);
228
+
// Binary files: upload directly
229
+
finalContent = originalContent;
230
+
console.log(`Uploading ${file.name} directly: ${originalContent.length} bytes (no compression)`);
231
+
logger.info(`Uploading ${file.name} directly: ${originalContent.length} bytes (binary)`);
236
-
uploadedFiles.push({
238
-
content: finalContent,
239
-
mimeType: originalMimeType,
240
-
size: finalContent.length,
234
+
uploadedFiles.push({
236
+
content: finalContent,
237
+
mimeType: originalMimeType,
238
+
size: finalContent.length,
247
-
// Update total file count after filtering (important for progress tracking)
248
-
updateJobProgress(jobId, {
249
-
totalFiles: uploadedFiles.length
245
+
// Update total file count after filtering (important for progress tracking)
246
+
updateJobProgress(jobId, {
247
+
totalFiles: uploadedFiles.length
252
-
// Check total size limit
253
-
const totalSize = uploadedFiles.reduce((sum, file) => sum + file.size, 0);
254
-
const maxTotalSize = MAX_SITE_SIZE;
250
+
// Check total size limit
251
+
const totalSize = uploadedFiles.reduce((sum, file) => sum + file.size, 0);
252
+
const maxTotalSize = MAX_SITE_SIZE;
256
-
if (totalSize > maxTotalSize) {
257
-
throw new Error(`Total upload size ${(totalSize / 1024 / 1024).toFixed(2)}MB exceeds 300MB limit`);
254
+
if (totalSize > maxTotalSize) {
255
+
throw new Error(`Total upload size ${(totalSize / 1024 / 1024).toFixed(2)}MB exceeds 300MB limit`);
260
-
// Check file count limit
261
-
if (uploadedFiles.length > MAX_FILE_COUNT) {
262
-
throw new Error(`File count ${uploadedFiles.length} exceeds ${MAX_FILE_COUNT} files limit`);
258
+
// Check file count limit
259
+
if (uploadedFiles.length > MAX_FILE_COUNT) {
260
+
throw new Error(`File count ${uploadedFiles.length} exceeds ${MAX_FILE_COUNT} files limit`);
265
-
console.log(`After filtering: ${uploadedFiles.length} files to process (${skippedFiles.length} skipped)`);
263
+
console.log(`After filtering: ${uploadedFiles.length} files to process (${skippedFiles.length} skipped)`);
267
-
if (uploadedFiles.length === 0) {
268
-
// Create empty manifest
269
-
const emptyManifest = {
270
-
$type: 'place.wisp.fs',
277
-
createdAt: new Date().toISOString()
265
+
if (uploadedFiles.length === 0) {
266
+
// Create empty manifest
267
+
const emptyManifest = {
268
+
$type: 'place.wisp.fs',
275
+
createdAt: new Date().toISOString()
280
-
const validationResult = validateRecord(emptyManifest);
281
-
if (!validationResult.success) {
282
-
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
278
+
const validationResult = validateRecord(emptyManifest);
279
+
if (!validationResult.success) {
280
+
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
285
-
const rkey = siteName;
286
-
updateJobProgress(jobId, { phase: 'finalizing' });
283
+
const rkey = siteName;
284
+
updateJobProgress(jobId, { phase: 'finalizing' });
288
-
const record = await agent.com.atproto.repo.putRecord({
290
-
collection: 'place.wisp.fs',
292
-
record: emptyManifest
286
+
const record = await agent.com.atproto.repo.putRecord({
288
+
collection: 'place.wisp.fs',
290
+
record: emptyManifest
295
-
await upsertSite(did, rkey, siteName);
293
+
await upsertSite(did, rkey, siteName);
297
-
completeUploadJob(jobId, {
299
-
uri: record.data.uri,
300
-
cid: record.data.cid,
295
+
completeUploadJob(jobId, {
297
+
uri: record.data.uri,
298
+
cid: record.data.cid,
308
-
// Process files into directory structure
309
-
console.log('Processing uploaded files into directory structure...');
310
-
const validUploadedFiles = uploadedFiles.filter((f, i) => {
311
-
if (!f || !f.name || !f.content) {
312
-
console.error(`Filtering out invalid file at index ${i}`);
306
+
// Process files into directory structure
307
+
console.log('Processing uploaded files into directory structure...');
308
+
const validUploadedFiles = uploadedFiles.filter((f, i) => {
309
+
if (!f || !f.name || !f.content) {
310
+
console.error(`Filtering out invalid file at index ${i}`);
318
-
const { directory, fileCount } = processUploadedFiles(validUploadedFiles);
319
-
console.log('Directory structure created, file count:', fileCount);
316
+
const { directory, fileCount } = processUploadedFiles(validUploadedFiles);
317
+
console.log('Directory structure created, file count:', fileCount);
321
-
// Upload files as blobs with retry logic for DPoP nonce conflicts
322
-
console.log('Starting blob upload/reuse phase...');
323
-
updateJobProgress(jobId, { phase: 'uploading' });
319
+
// Upload files as blobs with retry logic for DPoP nonce conflicts
320
+
console.log('Starting blob upload/reuse phase...');
321
+
updateJobProgress(jobId, { phase: 'uploading' });
325
-
// Helper function to upload blob with exponential backoff retry and timeout
326
-
const uploadBlobWithRetry = async (
333
-
for (let attempt = 0; attempt < maxRetries; attempt++) {
335
-
console.log(`[File Upload] Starting upload attempt ${attempt + 1}/${maxRetries} for ${fileName} (${content.length} bytes, ${mimeType})`);
323
+
// Helper function to upload blob with exponential backoff retry and timeout
324
+
const uploadBlobWithRetry = async (
331
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
332
+
const controller = new AbortController();
333
+
const timeoutMs = 300000; // 5 minute timeout per upload
334
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
337
-
// Add timeout wrapper to prevent hanging requests
338
-
const uploadPromise = agent.com.atproto.repo.uploadBlob(content, { encoding: mimeType });
339
-
const timeoutMs = 300000; // 5 minute timeout per upload
337
+
console.log(`[File Upload] Starting upload attempt ${attempt + 1}/${maxRetries} for ${fileName} (${content.length} bytes, ${mimeType})`);
341
-
const timeoutPromise = new Promise((_, reject) => {
342
-
setTimeout(() => reject(new Error('Upload timeout')), timeoutMs);
339
+
const result = await agent.com.atproto.repo.uploadBlob(content, { encoding: mimeType });
340
+
clearTimeout(timeoutId);
341
+
console.log(`[File Upload] ✅ Successfully uploaded ${fileName} on attempt ${attempt + 1}`);
343
+
} catch (error: any) {
344
+
clearTimeout(timeoutId);
345
-
const result = await Promise.race([uploadPromise, timeoutPromise]) as any;
346
-
console.log(`[File Upload] ✅ Successfully uploaded ${fileName} on attempt ${attempt + 1}`);
348
-
} catch (error: any) {
349
-
const isDPoPNonceError =
350
-
error?.message?.toLowerCase().includes('nonce') ||
351
-
error?.message?.toLowerCase().includes('dpop') ||
352
-
error?.status === 409;
346
+
const isDPoPNonceError =
347
+
error?.message?.toLowerCase().includes('nonce') ||
348
+
error?.message?.toLowerCase().includes('dpop') ||
349
+
error?.status === 409;
354
-
const isTimeout = error?.message === 'Upload timeout';
355
-
const isRateLimited = error?.status === 429 || error?.message?.toLowerCase().includes('rate');
351
+
const isTimeout = error?.name === 'AbortError' || error?.message === 'Upload timeout';
352
+
const isRateLimited = error?.status === 429 || error?.message?.toLowerCase().includes('rate');
357
-
// Retry on DPoP nonce conflicts, timeouts, or rate limits
358
-
if ((isDPoPNonceError || isTimeout || isRateLimited) && attempt < maxRetries - 1) {
359
-
let backoffMs: number;
360
-
if (isRateLimited) {
361
-
backoffMs = 2000 * Math.pow(2, attempt); // 2s, 4s, 8s, 16s for rate limits
362
-
} else if (isTimeout) {
363
-
backoffMs = 1000 * Math.pow(2, attempt); // 1s, 2s, 4s, 8s for timeouts
365
-
backoffMs = 100 * Math.pow(2, attempt); // 100ms, 200ms, 400ms for DPoP
354
+
// Retry on DPoP nonce conflicts, timeouts, or rate limits
355
+
if ((isDPoPNonceError || isTimeout || isRateLimited) && attempt < maxRetries - 1) {
356
+
let backoffMs: number;
357
+
if (isRateLimited) {
358
+
backoffMs = 2000 * Math.pow(2, attempt); // 2s, 4s, 8s, 16s for rate limits
359
+
} else if (isTimeout) {
360
+
backoffMs = 1000 * Math.pow(2, attempt); // 1s, 2s, 4s, 8s for timeouts
362
+
backoffMs = 100 * Math.pow(2, attempt); // 100ms, 200ms, 400ms for DPoP
368
-
const reason = isDPoPNonceError ? 'DPoP nonce conflict' : isTimeout ? 'timeout' : 'rate limit';
369
-
logger.info(`[File Upload] 🔄 ${reason} for ${fileName}, retrying in ${backoffMs}ms (attempt ${attempt + 1}/${maxRetries})`);
370
-
console.log(`[File Upload] 🔄 ${reason} for ${fileName}, retrying in ${backoffMs}ms`);
371
-
await new Promise(resolve => setTimeout(resolve, backoffMs));
365
+
const reason = isDPoPNonceError ? 'DPoP nonce conflict' : isTimeout ? 'timeout' : 'rate limit';
366
+
logger.info(`[File Upload] 🔄 ${reason} for ${fileName}, retrying in ${backoffMs}ms (attempt ${attempt + 1}/${maxRetries})`);
367
+
console.log(`[File Upload] 🔄 ${reason} for ${fileName}, retrying in ${backoffMs}ms`);
368
+
await new Promise(resolve => setTimeout(resolve, backoffMs));
375
-
// Log detailed error information before throwing
376
-
logger.error(`[File Upload] ❌ Upload failed for ${fileName} (size: ${content.length} bytes, mimeType: ${mimeType}, attempt: ${attempt + 1}/${maxRetries})`, {
377
-
error: error?.error || error?.message || 'Unknown error',
378
-
status: error?.status,
379
-
headers: error?.headers,
380
-
success: error?.success
382
-
console.error(`[File Upload] ❌ Upload failed for ${fileName}:`, {
383
-
error: error?.error || error?.message || 'Unknown error',
384
-
status: error?.status,
385
-
size: content.length,
387
-
attempt: attempt + 1
392
-
throw new Error(`Failed to upload ${fileName} after ${maxRetries} attempts`);
372
+
// Log detailed error information before throwing
373
+
logger.error(`[File Upload] ❌ Upload failed for ${fileName} (size: ${content.length} bytes, mimeType: ${mimeType}, attempt: ${attempt + 1}/${maxRetries})`, {
374
+
error: error?.error || error?.message || 'Unknown error',
375
+
status: error?.status,
376
+
headers: error?.headers,
377
+
success: error?.success
379
+
console.error(`[File Upload] ❌ Upload failed for ${fileName}:`, {
380
+
error: error?.error || error?.message || 'Unknown error',
381
+
status: error?.status,
382
+
size: content.length,
384
+
attempt: attempt + 1
389
+
throw new Error(`Failed to upload ${fileName} after ${maxRetries} attempts`);
395
-
// Use sliding window concurrency for maximum throughput
396
-
const CONCURRENCY_LIMIT = 20; // Maximum concurrent uploads
397
-
const uploadedBlobs: Array<{
398
-
result: FileUploadResult;
400
-
sentMimeType: string;
401
-
returnedMimeType: string;
404
-
const failedFiles: Array<{
392
+
// Use sliding window concurrency for maximum throughput
393
+
const CONCURRENCY_LIMIT = 20; // Maximum concurrent uploads
394
+
const uploadedBlobs: Array<{
395
+
result: FileUploadResult;
397
+
sentMimeType: string;
398
+
returnedMimeType: string;
401
+
const failedFiles: Array<{
411
-
// Process file with sliding window concurrency
412
-
const processFile = async (file: UploadedFile, index: number) => {
414
-
if (!file || !file.name) {
415
-
throw new Error(`Undefined file at index ${index}`);
408
+
// Process file with sliding window concurrency
409
+
const processFile = async (file: UploadedFile, index: number) => {
411
+
if (!file || !file.name) {
412
+
throw new Error(`Undefined file at index ${index}`);
418
-
const fileCID = computeCID(file.content);
419
-
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
420
-
const existingBlob = existingBlobMap.get(normalizedPath) || existingBlobMap.get(file.name);
415
+
const fileCID = computeCID(file.content);
416
+
const normalizedPath = file.name.replace(/^[^\/]*\//, '');
417
+
const existingBlob = existingBlobMap.get(normalizedPath) || existingBlobMap.get(file.name);
422
-
if (existingBlob && existingBlob.cid === fileCID) {
423
-
logger.info(`[File Upload] ♻️ Reused: ${file.name} (unchanged, CID: ${fileCID})`);
424
-
updateJobProgress(jobId, {
425
-
filesReused: (getUploadJob(jobId)?.progress.filesReused || 0) + 1
419
+
if (existingBlob && existingBlob.cid === fileCID) {
420
+
logger.info(`[File Upload] ♻️ Reused: ${file.name} (unchanged, CID: ${fileCID})`);
421
+
updateJobProgress(jobId, {
422
+
filesReused: (getUploadJob(jobId)?.progress.filesReused || 0) + 1
430
-
hash: existingBlob.cid,
431
-
blobRef: existingBlob.blobRef,
432
-
...(file.compressed && {
433
-
encoding: 'gzip' as const,
434
-
mimeType: file.originalMimeType || file.mimeType,
435
-
base64: file.base64Encoded || false
438
-
filePath: file.name,
439
-
sentMimeType: file.mimeType,
440
-
returnedMimeType: existingBlob.blobRef.mimeType,
427
+
hash: existingBlob.cid,
428
+
blobRef: existingBlob.blobRef,
429
+
...(file.compressed && {
430
+
encoding: 'gzip' as const,
431
+
mimeType: file.originalMimeType || file.mimeType,
432
+
base64: file.base64Encoded || false
435
+
filePath: file.name,
436
+
sentMimeType: file.mimeType,
437
+
returnedMimeType: existingBlob.blobRef.mimeType,
445
-
const uploadMimeType = file.compressed || file.mimeType.startsWith('text/html')
446
-
? 'application/octet-stream'
442
+
const uploadMimeType = file.compressed || file.mimeType.startsWith('text/html')
443
+
? 'application/octet-stream'
449
-
const compressionInfo = file.compressed ? ' (gzipped)' : '';
450
-
const fileSizeMB = (file.size / 1024 / 1024).toFixed(2);
451
-
logger.info(`[File Upload] ⬆️ Uploading: ${file.name} (${fileSizeMB}MB${compressionInfo})`);
446
+
const compressionInfo = file.compressed ? ' (gzipped)' : '';
447
+
const fileSizeMB = (file.size / 1024 / 1024).toFixed(2);
448
+
logger.info(`[File Upload] ⬆️ Uploading: ${file.name} (${fileSizeMB}MB${compressionInfo})`);
453
-
const uploadResult = await uploadBlobWithRetry(
450
+
const uploadResult = await uploadBlobWithRetry(
460
-
const returnedBlobRef = uploadResult.data.blob;
461
-
updateJobProgress(jobId, {
462
-
filesUploaded: (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1
464
-
logger.info(`[File Upload] ✅ Uploaded: ${file.name} (CID: ${fileCID})`);
457
+
const returnedBlobRef = uploadResult.data.blob;
458
+
updateJobProgress(jobId, {
459
+
filesUploaded: (getUploadJob(jobId)?.progress.filesUploaded || 0) + 1
461
+
logger.info(`[File Upload] ✅ Uploaded: ${file.name} (CID: ${fileCID})`);
468
-
hash: returnedBlobRef.ref.toString(),
469
-
blobRef: returnedBlobRef,
470
-
...(file.compressed && {
471
-
encoding: 'gzip' as const,
472
-
mimeType: file.originalMimeType || file.mimeType,
473
-
base64: file.base64Encoded || false
476
-
filePath: file.name,
477
-
sentMimeType: file.mimeType,
478
-
returnedMimeType: returnedBlobRef.mimeType,
481
-
} catch (uploadError) {
482
-
const fileName = file?.name || 'unknown';
483
-
const fileSize = file?.size || 0;
484
-
const errorMessage = uploadError instanceof Error ? uploadError.message : 'Unknown error';
485
-
const errorDetails = {
489
-
error: errorMessage,
490
-
stack: uploadError instanceof Error ? uploadError.stack : undefined
492
-
logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
493
-
console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
465
+
hash: returnedBlobRef.ref.toString(),
466
+
blobRef: returnedBlobRef,
467
+
...(file.compressed && {
468
+
encoding: 'gzip' as const,
469
+
mimeType: file.originalMimeType || file.mimeType,
470
+
base64: file.base64Encoded || false
473
+
filePath: file.name,
474
+
sentMimeType: file.mimeType,
475
+
returnedMimeType: returnedBlobRef.mimeType,
478
+
} catch (uploadError) {
479
+
const fileName = file?.name || 'unknown';
480
+
const fileSize = file?.size || 0;
481
+
const errorMessage = uploadError instanceof Error ? uploadError.message : 'Unknown error';
482
+
const errorDetails = {
486
+
error: errorMessage,
487
+
stack: uploadError instanceof Error ? uploadError.stack : undefined
489
+
logger.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
490
+
console.error(`Upload failed for file: ${fileName} (${fileSize} bytes) at index ${index}`, errorDetails);
495
-
// Track failed file but don't throw - continue with other files
499
-
error: errorMessage,
492
+
// Track failed file but don't throw - continue with other files
496
+
error: errorMessage,
503
-
return null; // Return null to indicate failure
500
+
return null; // Return null to indicate failure
507
-
// Sliding window concurrency control
508
-
const processWithConcurrency = async () => {
509
-
const results: any[] = [];
511
-
const executing = new Map<Promise<void>, { index: number; name: string }>();
504
+
// Sliding window concurrency control
505
+
const processWithConcurrency = async () => {
506
+
const results: any[] = [];
508
+
const executing = new Map<Promise<void>, { index: number; name: string }>();
513
-
for (const file of validUploadedFiles) {
514
-
const currentIndex = fileIndex++;
510
+
for (const file of validUploadedFiles) {
511
+
const currentIndex = fileIndex++;
516
-
const promise = processFile(file, currentIndex)
518
-
results[currentIndex] = result;
519
-
console.log(`[Concurrency] File ${currentIndex} (${file.name}) completed successfully`);
522
-
// This shouldn't happen since processFile catches errors, but just in case
523
-
logger.error(`Unexpected error processing file at index ${currentIndex}`, error);
524
-
console.error(`[Concurrency] File ${currentIndex} (${file.name}) had unexpected error:`, error);
525
-
results[currentIndex] = null;
528
-
executing.delete(promise);
529
-
const remaining = Array.from(executing.values()).map(f => `${f.index}:${f.name}`);
530
-
console.log(`[Concurrency] File ${currentIndex} (${file.name}) removed. Remaining ${executing.size}: [${remaining.join(', ')}]`);
513
+
const promise = processFile(file, currentIndex)
515
+
results[currentIndex] = result;
518
+
// This shouldn't happen since processFile catches errors, but just in case
519
+
logger.error(`Unexpected error processing file at index ${currentIndex}`, error);
520
+
results[currentIndex] = null;
523
+
executing.delete(promise);
533
-
executing.set(promise, { index: currentIndex, name: file.name });
534
-
const current = Array.from(executing.values()).map(f => `${f.index}:${f.name}`);
535
-
console.log(`[Concurrency] Added file ${currentIndex} (${file.name}). Total ${executing.size}: [${current.join(', ')}]`);
526
+
executing.set(promise, { index: currentIndex, name: file.name });
537
-
if (executing.size >= CONCURRENCY_LIMIT) {
538
-
console.log(`[Concurrency] Hit limit (${CONCURRENCY_LIMIT}), waiting for one to complete...`);
539
-
await Promise.race(executing.keys());
540
-
console.log(`[Concurrency] One completed, continuing. Remaining: ${executing.size}`);
528
+
if (executing.size >= CONCURRENCY_LIMIT) {
529
+
await Promise.race(executing.keys());
544
-
// Wait for remaining uploads
545
-
const remaining = Array.from(executing.values()).map(f => `${f.index}:${f.name}`);
546
-
console.log(`[Concurrency] Waiting for ${executing.size} remaining uploads: [${remaining.join(', ')}]`);
547
-
await Promise.all(executing.keys());
548
-
console.log(`[Concurrency] All uploads complete!`);
549
-
return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries
533
+
// Wait for remaining uploads
534
+
await Promise.all(executing.keys());
535
+
return results.filter(r => r !== undefined && r !== null); // Filter out null (failed) and undefined entries
552
-
const allResults = await processWithConcurrency();
553
-
uploadedBlobs.push(...allResults);
538
+
const allResults = await processWithConcurrency();
539
+
uploadedBlobs.push(...allResults);
555
-
const currentReused = uploadedBlobs.filter(b => b.reused).length;
556
-
const currentUploaded = uploadedBlobs.filter(b => !b.reused).length;
557
-
const successfulCount = uploadedBlobs.length;
558
-
const failedCount = failedFiles.length;
541
+
const currentReused = uploadedBlobs.filter(b => b.reused).length;
542
+
const currentUploaded = uploadedBlobs.filter(b => !b.reused).length;
543
+
const successfulCount = uploadedBlobs.length;
544
+
const failedCount = failedFiles.length;
560
-
logger.info(`[File Upload] 🎉 Upload complete → ${successfulCount}/${validUploadedFiles.length} files succeeded (${currentUploaded} uploaded, ${currentReused} reused), ${failedCount} failed`);
546
+
logger.info(`[File Upload] 🎉 Upload complete → ${successfulCount}/${validUploadedFiles.length} files succeeded (${currentUploaded} uploaded, ${currentReused} reused), ${failedCount} failed`);
562
-
if (failedCount > 0) {
563
-
logger.warn(`[File Upload] ⚠️ Failed files:`, failedFiles);
564
-
console.warn(`[File Upload] ⚠️ ${failedCount} files failed to upload:`, failedFiles.map(f => f.name).join(', '));
548
+
if (failedCount > 0) {
549
+
logger.warn(`[File Upload] ⚠️ Failed files:`, failedFiles);
550
+
console.warn(`[File Upload] ⚠️ ${failedCount} files failed to upload:`, failedFiles.map(f => f.name).join(', '));
567
-
const reusedCount = uploadedBlobs.filter(b => b.reused).length;
568
-
const uploadedCount = uploadedBlobs.filter(b => !b.reused).length;
569
-
logger.info(`[File Upload] 🎉 Upload phase complete! Total: ${successfulCount} files (${uploadedCount} uploaded, ${reusedCount} reused)`);
553
+
const reusedCount = uploadedBlobs.filter(b => b.reused).length;
554
+
const uploadedCount = uploadedBlobs.filter(b => !b.reused).length;
555
+
logger.info(`[File Upload] 🎉 Upload phase complete! Total: ${successfulCount} files (${uploadedCount} uploaded, ${reusedCount} reused)`);
571
-
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
572
-
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
557
+
const uploadResults: FileUploadResult[] = uploadedBlobs.map(blob => blob.result);
558
+
const filePaths: string[] = uploadedBlobs.map(blob => blob.filePath);
574
-
// Update directory with file blobs
575
-
console.log('Updating directory with blob references...');
576
-
updateJobProgress(jobId, { phase: 'creating_manifest' });
577
-
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
560
+
// Update directory with file blobs
561
+
console.log('Updating directory with blob references...');
562
+
updateJobProgress(jobId, { phase: 'creating_manifest' });
563
+
const updatedDirectory = updateFileBlobs(directory, uploadResults, filePaths);
579
-
// Check if we need to split into subfs records
580
-
// Split proactively if we have lots of files to avoid hitting manifest size limits
581
-
const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB)
582
-
const FILE_COUNT_THRESHOLD = 250; // Start splitting early
583
-
const subfsRecords: Array<{ uri: string; path: string }> = [];
584
-
let workingDirectory = updatedDirectory;
585
-
let currentFileCount = fileCount;
565
+
// Check if we need to split into subfs records
566
+
// Split proactively if we have lots of files to avoid hitting manifest size limits
567
+
const MAX_MANIFEST_SIZE = 140 * 1024; // 140KB to be safe (PDS limit is 150KB)
568
+
const FILE_COUNT_THRESHOLD = 250; // Start splitting early
569
+
const subfsRecords: Array<{ uri: string; path: string }> = [];
570
+
let workingDirectory = updatedDirectory;
571
+
let currentFileCount = fileCount;
587
-
// Create initial manifest to check size
588
-
let manifest = createManifest(siteName, workingDirectory, fileCount);
589
-
let manifestSize = JSON.stringify(manifest).length;
573
+
// Create initial manifest to check size
574
+
let manifest = createManifest(siteName, workingDirectory, fileCount);
575
+
let manifestSize = JSON.stringify(manifest).length;
591
-
// Split if we have lots of files OR if manifest is already too large
592
-
if (fileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
593
-
console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
594
-
logger.info(`Large site with ${fileCount} files, splitting into subfs records`);
577
+
// Split if we have lots of files OR if manifest is already too large
578
+
if (fileCount >= FILE_COUNT_THRESHOLD || manifestSize > MAX_MANIFEST_SIZE) {
579
+
console.log(`⚠️ Large site detected (${fileCount} files, ${(manifestSize / 1024).toFixed(1)}KB), splitting into subfs records...`);
580
+
logger.info(`Large site with ${fileCount} files, splitting into subfs records`);
596
-
// Keep splitting until manifest fits under limit
598
-
const MAX_ATTEMPTS = 100; // Allow many splits for very large sites
582
+
// Keep splitting until manifest fits under limit
584
+
const MAX_ATTEMPTS = 100; // Allow many splits for very large sites
600
-
while (manifestSize > MAX_MANIFEST_SIZE && attempts < MAX_ATTEMPTS) {
586
+
while (manifestSize > MAX_MANIFEST_SIZE && attempts < MAX_ATTEMPTS) {
603
-
// Find all directories sorted by size (largest first)
604
-
const directories = findLargeDirectories(workingDirectory);
605
-
directories.sort((a, b) => b.size - a.size);
589
+
// Find all directories sorted by size (largest first)
590
+
const directories = findLargeDirectories(workingDirectory);
591
+
directories.sort((a, b) => b.size - a.size);
607
-
if (directories.length === 0) {
608
-
// No more directories to split - this should be very rare
610
-
`Cannot split manifest further - no subdirectories available. ` +
611
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
612
-
`Try organizing files into subdirectories.`
593
+
if (directories.length === 0) {
594
+
// No more directories to split - this should be very rare
596
+
`Cannot split manifest further - no subdirectories available. ` +
597
+
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
598
+
`Try organizing files into subdirectories.`
616
-
// Pick the largest directory
617
-
const largestDir = directories[0];
618
-
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
602
+
// Pick the largest directory
603
+
const largestDir = directories[0];
604
+
console.log(` Split #${attempts}: ${largestDir.path} (${largestDir.fileCount} files, ${(largestDir.size / 1024).toFixed(1)}KB)`);
620
-
// Create a subfs record for this directory
621
-
const subfsRkey = TID.nextStr();
622
-
const subfsManifest = {
623
-
$type: 'place.wisp.subfs' as const,
624
-
root: largestDir.directory,
625
-
fileCount: largestDir.fileCount,
626
-
createdAt: new Date().toISOString()
606
+
// Create a subfs record for this directory
607
+
const subfsRkey = TID.nextStr();
608
+
const subfsManifest = {
609
+
$type: 'place.wisp.subfs' as const,
610
+
root: largestDir.directory,
611
+
fileCount: largestDir.fileCount,
612
+
createdAt: new Date().toISOString()
629
-
// Validate subfs record
630
-
const subfsValidation = validateSubfsRecord(subfsManifest);
631
-
if (!subfsValidation.success) {
632
-
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
615
+
// Validate subfs record
616
+
const subfsValidation = validateSubfsRecord(subfsManifest);
617
+
if (!subfsValidation.success) {
618
+
throw new Error(`Invalid subfs manifest: ${subfsValidation.error?.message || 'Validation failed'}`);
635
-
// Upload subfs record to PDS
636
-
const subfsRecord = await agent.com.atproto.repo.putRecord({
638
-
collection: 'place.wisp.subfs',
640
-
record: subfsManifest
621
+
// Upload subfs record to PDS
622
+
const subfsRecord = await agent.com.atproto.repo.putRecord({
624
+
collection: 'place.wisp.subfs',
626
+
record: subfsManifest
643
-
const subfsUri = subfsRecord.data.uri;
644
-
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
645
-
console.log(` ✅ Created subfs: ${subfsUri}`);
646
-
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
629
+
const subfsUri = subfsRecord.data.uri;
630
+
subfsRecords.push({ uri: subfsUri, path: largestDir.path });
631
+
console.log(` ✅ Created subfs: ${subfsUri}`);
632
+
logger.info(`Created subfs record for ${largestDir.path}: ${subfsUri}`);
648
-
// Replace directory with subfs node in the main tree
649
-
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
634
+
// Replace directory with subfs node in the main tree
635
+
workingDirectory = replaceDirectoryWithSubfs(workingDirectory, largestDir.path, subfsUri);
651
-
// Recreate manifest and check new size
652
-
currentFileCount -= largestDir.fileCount;
653
-
manifest = createManifest(siteName, workingDirectory, fileCount);
654
-
manifestSize = JSON.stringify(manifest).length;
655
-
const newSizeKB = (manifestSize / 1024).toFixed(1);
656
-
console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);
637
+
// Recreate manifest and check new size
638
+
currentFileCount -= largestDir.fileCount;
639
+
manifest = createManifest(siteName, workingDirectory, fileCount);
640
+
manifestSize = JSON.stringify(manifest).length;
641
+
const newSizeKB = (manifestSize / 1024).toFixed(1);
642
+
console.log(` → Manifest now ${newSizeKB}KB with ${currentFileCount} files (${subfsRecords.length} subfs total)`);
658
-
// Check if we're under the limit now
659
-
if (manifestSize <= MAX_MANIFEST_SIZE) {
660
-
console.log(` ✅ Manifest fits! (${newSizeKB}KB < 140KB)`);
644
+
// Check if we're under the limit now
645
+
if (manifestSize <= MAX_MANIFEST_SIZE) {
646
+
console.log(` ✅ Manifest fits! (${newSizeKB}KB < 140KB)`);
665
-
if (manifestSize > MAX_MANIFEST_SIZE) {
667
-
`Failed to fit manifest after splitting ${attempts} directories. ` +
668
-
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
669
-
`This should never happen - please report this issue.`
651
+
if (manifestSize > MAX_MANIFEST_SIZE) {
653
+
`Failed to fit manifest after splitting ${attempts} directories. ` +
654
+
`Current size: ${(manifestSize / 1024).toFixed(1)}KB. ` +
655
+
`This should never happen - please report this issue.`
673
-
console.log(`✅ Split complete: ${subfsRecords.length} subfs records, ${currentFileCount} files in main, ${(manifestSize / 1024).toFixed(1)}KB manifest`);
674
-
logger.info(`Split into ${subfsRecords.length} subfs records, ${currentFileCount} files remaining in main tree`);
676
-
const manifestSizeKB = (manifestSize / 1024).toFixed(1);
677
-
console.log(`Manifest created (${fileCount} files, ${manifestSizeKB}KB JSON) - no splitting needed`);
659
+
console.log(`✅ Split complete: ${subfsRecords.length} subfs records, ${currentFileCount} files in main, ${(manifestSize / 1024).toFixed(1)}KB manifest`);
660
+
logger.info(`Split into ${subfsRecords.length} subfs records, ${currentFileCount} files remaining in main tree`);
662
+
const manifestSizeKB = (manifestSize / 1024).toFixed(1);
663
+
console.log(`Manifest created (${fileCount} files, ${manifestSizeKB}KB JSON) - no splitting needed`);
680
-
const rkey = siteName;
681
-
updateJobProgress(jobId, { phase: 'finalizing' });
666
+
const rkey = siteName;
667
+
updateJobProgress(jobId, { phase: 'finalizing' });
683
-
console.log('Putting record to PDS with rkey:', rkey);
684
-
const record = await agent.com.atproto.repo.putRecord({
686
-
collection: 'place.wisp.fs',
690
-
console.log('Record successfully created on PDS:', record.data.uri);
669
+
console.log('Putting record to PDS with rkey:', rkey);
670
+
const record = await agent.com.atproto.repo.putRecord({
672
+
collection: 'place.wisp.fs',
676
+
console.log('Record successfully created on PDS:', record.data.uri);
692
-
// Store site in database cache
693
-
await upsertSite(did, rkey, siteName);
678
+
// Store site in database cache
679
+
await upsertSite(did, rkey, siteName);
695
-
// Clean up old subfs records if we had any
696
-
if (oldSubfsUris.length > 0) {
697
-
console.log(`Cleaning up ${oldSubfsUris.length} old subfs records...`);
698
-
logger.info(`Cleaning up ${oldSubfsUris.length} old subfs records`);
681
+
// Clean up old subfs records if we had any
682
+
if (oldSubfsUris.length > 0) {
683
+
console.log(`Cleaning up ${oldSubfsUris.length} old subfs records...`);
684
+
logger.info(`Cleaning up ${oldSubfsUris.length} old subfs records`);
700
-
// Delete old subfs records in parallel (don't wait for completion)
702
-
oldSubfsUris.map(async ({ uri }) => {
704
-
// Parse URI: at://did/collection/rkey
705
-
const parts = uri.replace('at://', '').split('/');
706
-
const subRkey = parts[2];
686
+
// Delete old subfs records in parallel (don't wait for completion)
688
+
oldSubfsUris.map(async ({ uri }) => {
690
+
// Parse URI: at://did/collection/rkey
691
+
const parts = uri.replace('at://', '').split('/');
692
+
const subRkey = parts[2];
708
-
await agent.com.atproto.repo.deleteRecord({
710
-
collection: 'place.wisp.subfs',
694
+
await agent.com.atproto.repo.deleteRecord({
696
+
collection: 'place.wisp.subfs',
714
-
console.log(` 🗑️ Deleted old subfs: ${uri}`);
715
-
logger.info(`Deleted old subfs record: ${uri}`);
716
-
} catch (err: any) {
717
-
// Don't fail the whole upload if cleanup fails
718
-
console.warn(`Failed to delete old subfs ${uri}:`, err?.message);
719
-
logger.warn(`Failed to delete old subfs ${uri}`, err);
723
-
// Log but don't fail if cleanup fails
724
-
logger.warn('Some subfs cleanup operations failed', err);
700
+
console.log(` 🗑️ Deleted old subfs: ${uri}`);
701
+
logger.info(`Deleted old subfs record: ${uri}`);
702
+
} catch (err: any) {
703
+
// Don't fail the whole upload if cleanup fails
704
+
console.warn(`Failed to delete old subfs ${uri}:`, err?.message);
705
+
logger.warn(`Failed to delete old subfs ${uri}`, err);
709
+
// Log but don't fail if cleanup fails
710
+
logger.warn('Some subfs cleanup operations failed', err);
728
-
completeUploadJob(jobId, {
730
-
uri: record.data.uri,
731
-
cid: record.data.cid,
736
-
uploadedCount: validUploadedFiles.length - failedFiles.length,
737
-
hasFailures: failedFiles.length > 0
714
+
completeUploadJob(jobId, {
716
+
uri: record.data.uri,
717
+
cid: record.data.cid,
722
+
uploadedCount: validUploadedFiles.length - failedFiles.length,
723
+
hasFailures: failedFiles.length > 0
740
-
console.log('=== UPLOAD FILES COMPLETE ===');
742
-
console.error('=== UPLOAD ERROR ===');
743
-
console.error('Error details:', error);
744
-
logger.error('Upload error', error);
745
-
failUploadJob(jobId, error instanceof Error ? error.message : 'Unknown error');
726
+
console.log('=== UPLOAD FILES COMPLETE ===');
728
+
console.error('=== UPLOAD ERROR ===');
729
+
console.error('Error details:', error);
730
+
logger.error('Upload error', error);
731
+
failUploadJob(jobId, error instanceof Error ? error.message : 'Unknown error');
export const wispRoutes = (client: NodeOAuthClient, cookieSecret: string) =>
753
-
secrets: cookieSecret,
757
-
.derive(async ({ cookie }) => {
758
-
const auth = await requireAuth(client, cookie)
762
-
'/upload-progress/:jobId',
763
-
async ({ params: { jobId }, auth, set }) => {
764
-
const job = getUploadJob(jobId);
739
+
secrets: cookieSecret,
743
+
.derive(async ({ cookie }) => {
744
+
const auth = await requireAuth(client, cookie)
748
+
'/upload-progress/:jobId',
749
+
async ({ params: { jobId }, auth, set }) => {
750
+
const job = getUploadJob(jobId);
768
-
return { error: 'Job not found' };
754
+
return { error: 'Job not found' };
771
-
// Verify job belongs to authenticated user
772
-
if (job.did !== auth.did) {
774
-
return { error: 'Unauthorized' };
757
+
// Verify job belongs to authenticated user
758
+
if (job.did !== auth.did) {
760
+
return { error: 'Unauthorized' };
777
-
// Set up SSE headers
779
-
'Content-Type': 'text/event-stream',
780
-
'Cache-Control': 'no-cache',
781
-
'Connection': 'keep-alive'
763
+
// Set up SSE headers
765
+
'Content-Type': 'text/event-stream',
766
+
'Cache-Control': 'no-cache',
767
+
'Connection': 'keep-alive'
784
-
const stream = new ReadableStream({
785
-
start(controller) {
786
-
const encoder = new TextEncoder();
770
+
const stream = new ReadableStream({
771
+
start(controller) {
772
+
const encoder = new TextEncoder();
788
-
// Send initial state
789
-
const sendEvent = (event: string, data: any) => {
791
-
const message = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
792
-
controller.enqueue(encoder.encode(message));
794
-
// Controller closed, ignore
774
+
// Send initial state
775
+
const sendEvent = (event: string, data: any) => {
777
+
const message = `event: ${event}\ndata: ${JSON.stringify(data)}\n\n`;
778
+
controller.enqueue(encoder.encode(message));
780
+
// Controller closed, ignore
798
-
// Send keepalive comment every 15 seconds to prevent timeout
799
-
const keepaliveInterval = setInterval(() => {
801
-
controller.enqueue(encoder.encode(': keepalive\n\n'));
803
-
// Controller closed, stop sending keepalives
804
-
clearInterval(keepaliveInterval);
784
+
// Send keepalive comment every 15 seconds to prevent timeout
785
+
const keepaliveInterval = setInterval(() => {
787
+
controller.enqueue(encoder.encode(': keepalive\n\n'));
789
+
// Controller closed, stop sending keepalives
790
+
clearInterval(keepaliveInterval);
808
-
// Send current job state immediately
809
-
sendEvent('progress', {
810
-
status: job.status,
811
-
progress: job.progress,
812
-
result: job.result,
794
+
// Send current job state immediately
795
+
sendEvent('progress', {
796
+
status: job.status,
797
+
progress: job.progress,
798
+
result: job.result,
816
-
// If job is already completed or failed, close the stream
817
-
if (job.status === 'completed' || job.status === 'failed') {
818
-
clearInterval(keepaliveInterval);
819
-
controller.close();
802
+
// If job is already completed or failed, close the stream
803
+
if (job.status === 'completed' || job.status === 'failed') {
804
+
clearInterval(keepaliveInterval);
805
+
controller.close();
823
-
// Listen for updates
824
-
const cleanup = addJobListener(jobId, (event, data) => {
825
-
sendEvent(event, data);
809
+
// Listen for updates
810
+
const cleanup = addJobListener(jobId, (event, data) => {
811
+
sendEvent(event, data);
827
-
// Close stream after done or error event
828
-
if (event === 'done' || event === 'error') {
829
-
clearInterval(keepaliveInterval);
832
-
controller.close();
813
+
// Close stream after done or error event
814
+
if (event === 'done' || event === 'error') {
815
+
clearInterval(keepaliveInterval);
818
+
controller.close();
840
-
// Cleanup on disconnect
842
-
clearInterval(keepaliveInterval);
826
+
// Cleanup on disconnect
828
+
clearInterval(keepaliveInterval);
848
-
return new Response(stream);
853
-
async ({ body, auth }) => {
854
-
const { siteName, files } = body as {
856
-
files: File | File[]
834
+
return new Response(stream);
839
+
async ({ body, auth }) => {
840
+
const { siteName, files } = body as {
842
+
files: File | File[]
859
-
console.log('=== UPLOAD FILES START ===');
860
-
console.log('Site name:', siteName);
861
-
console.log('Files received:', Array.isArray(files) ? files.length : 'single file');
845
+
console.log('=== UPLOAD FILES START ===');
846
+
console.log('Site name:', siteName);
847
+
console.log('Files received:', Array.isArray(files) ? files.length : 'single file');
865
-
throw new Error('Site name is required')
851
+
throw new Error('Site name is required')
868
-
if (!isValidSiteName(siteName)) {
869
-
throw new Error('Invalid site name: must be 1-512 characters and contain only alphanumeric, dots, dashes, underscores, tildes, and colons')
854
+
if (!isValidSiteName(siteName)) {
855
+
throw new Error('Invalid site name: must be 1-512 characters and contain only alphanumeric, dots, dashes, underscores, tildes, and colons')
872
-
// Check if files were provided
873
-
const hasFiles = files && (Array.isArray(files) ? files.length > 0 : !!files);
858
+
// Check if files were provided
859
+
const hasFiles = files && (Array.isArray(files) ? files.length > 0 : !!files);
876
-
// Handle empty upload synchronously (fast operation)
877
-
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
862
+
// Handle empty upload synchronously (fast operation)
863
+
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
879
-
const emptyManifest = {
880
-
$type: 'place.wisp.fs',
887
-
createdAt: new Date().toISOString()
865
+
const emptyManifest = {
866
+
$type: 'place.wisp.fs',
873
+
createdAt: new Date().toISOString()
890
-
const validationResult = validateRecord(emptyManifest);
891
-
if (!validationResult.success) {
892
-
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
876
+
const validationResult = validateRecord(emptyManifest);
877
+
if (!validationResult.success) {
878
+
throw new Error(`Invalid manifest: ${validationResult.error?.message || 'Validation failed'}`);
895
-
const rkey = siteName;
881
+
const rkey = siteName;
897
-
const record = await agent.com.atproto.repo.putRecord({
899
-
collection: 'place.wisp.fs',
901
-
record: emptyManifest
883
+
const record = await agent.com.atproto.repo.putRecord({
885
+
collection: 'place.wisp.fs',
887
+
record: emptyManifest
904
-
await upsertSite(auth.did, rkey, siteName);
890
+
await upsertSite(auth.did, rkey, siteName);
908
-
uri: record.data.uri,
909
-
cid: record.data.cid,
894
+
uri: record.data.uri,
895
+
cid: record.data.cid,
915
-
// For file uploads, create a job and process in background
916
-
const fileArray = Array.isArray(files) ? files : [files];
917
-
const jobId = createUploadJob(auth.did, siteName, fileArray.length);
901
+
// For file uploads, create a job and process in background
902
+
const fileArray = Array.isArray(files) ? files : [files];
903
+
const jobId = createUploadJob(auth.did, siteName, fileArray.length);
919
-
// Create agent with OAuth session
920
-
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
921
-
console.log('Agent created for DID:', auth.did);
922
-
console.log('Created upload job:', jobId);
905
+
// Create agent with OAuth session
906
+
const agent = new Agent((url, init) => auth.session.fetchHandler(url, init))
907
+
console.log('Agent created for DID:', auth.did);
908
+
console.log('Created upload job:', jobId);
924
-
// Start background processing (don't await)
925
-
processUploadInBackground(jobId, agent, auth.did, siteName, fileArray).catch(err => {
926
-
console.error('Background upload process failed:', err);
927
-
logger.error('Background upload process failed', err);
910
+
// Start background processing (don't await)
911
+
processUploadInBackground(jobId, agent, auth.did, siteName, fileArray).catch(err => {
912
+
console.error('Background upload process failed:', err);
913
+
logger.error('Background upload process failed', err);
930
-
// Return immediately with job ID
934
-
message: 'Upload started. Connect to /wisp/upload-progress/' + jobId + ' for progress updates.'
937
-
console.error('=== UPLOAD ERROR ===');
938
-
console.error('Error details:', error);
939
-
logger.error('Upload error', error);
940
-
throw new Error(`Failed to upload files: ${error instanceof Error ? error.message : 'Unknown error'}`);
916
+
// Return immediately with job ID
920
+
message: 'Upload started. Connect to /wisp/upload-progress/' + jobId + ' for progress updates.'
923
+
console.error('=== UPLOAD ERROR ===');
924
+
console.error('Error details:', error);
925
+
logger.error('Upload error', error);
926
+
throw new Error(`Failed to upload files: ${error instanceof Error ? error.message : 'Unknown error'}`);