파일을 업로드할 때 청크별로 개별 저장하는 대신 파일 하나에 저장하도록 변경

This commit is contained in:
static
2026-03-10 22:44:11 +09:00
parent c2874035ba
commit 7f68f6d580
10 changed files with 105 additions and 121 deletions

View File

@@ -1,10 +1,9 @@
import { error } from "@sveltejs/kit";
import { createHash } from "crypto";
import { createWriteStream } from "fs";
import { open } from "fs/promises";
import { Readable } from "stream";
import { ENCRYPTION_OVERHEAD, ENCRYPTED_CHUNK_SIZE } from "$lib/constants";
import { UploadRepo } from "$lib/server/db";
import { safeRecursiveRm, safeUnlink } from "$lib/server/modules/filesystem";
import { safeUnlink } from "$lib/server/modules/filesystem";
const chunkLocks = new Set<string>();
@@ -14,12 +13,61 @@ const isChunkUploaded = (bitmap: Buffer, chunkIndex: number) => {
return !!byte && (byte & (1 << (chunkIndex % 8))) !== 0; // Postgres sucks
};
const writeChunkAtOffset = async (
path: string,
encChunkStream: Readable,
chunkIndex: number,
isLastChunk: boolean,
) => {
const offset = (chunkIndex - 1) * ENCRYPTED_CHUNK_SIZE;
const file = await open(path, "r+");
let written = 0;
try {
for await (const chunk of encChunkStream) {
const buffer = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk);
written += buffer.length;
if (written > ENCRYPTED_CHUNK_SIZE) {
throw new Error("Invalid chunk size");
}
let chunkOffset = 0;
while (chunkOffset < buffer.length) {
const { bytesWritten } = await file.write(
buffer,
chunkOffset,
buffer.length - chunkOffset,
offset + written - buffer.length + chunkOffset,
);
if (bytesWritten <= 0) {
throw new Error("Failed to write chunk");
}
chunkOffset += bytesWritten;
}
}
if (
(!isLastChunk && written !== ENCRYPTED_CHUNK_SIZE) ||
(isLastChunk && (written <= ENCRYPTION_OVERHEAD || written > ENCRYPTED_CHUNK_SIZE))
) {
throw new Error("Invalid chunk size");
}
if (isLastChunk) {
await file.truncate(offset + written);
}
return written;
} finally {
await file.close();
}
};
export const uploadChunk = async (
userId: number,
sessionId: string,
chunkIndex: number,
encChunkStream: Readable,
encChunkHash: string,
) => {
const lockKey = `${sessionId}/${chunkIndex}`;
if (chunkLocks.has(lockKey)) {
@@ -28,8 +76,6 @@ export const uploadChunk = async (
chunkLocks.add(lockKey);
}
let filePath;
try {
const session = await UploadRepo.getUploadSession(sessionId, userId);
if (!session) {
@@ -41,39 +87,10 @@ export const uploadChunk = async (
}
const isLastChunk = chunkIndex === session.totalChunks;
filePath = `${session.path}/${chunkIndex}`;
const hashStream = createHash("sha256");
const writeStream = createWriteStream(filePath, { flags: "wx", mode: 0o600 });
let writtenBytes = 0;
for await (const chunk of encChunkStream) {
hashStream.update(chunk);
writeStream.write(chunk);
writtenBytes += chunk.length;
}
await new Promise<void>((resolve, reject) => {
writeStream.end((e: any) => (e ? reject(e) : resolve()));
});
if (hashStream.digest("base64") !== encChunkHash) {
throw new Error("Invalid checksum");
} else if (
(!isLastChunk && writtenBytes !== ENCRYPTED_CHUNK_SIZE) ||
(isLastChunk && (writtenBytes <= ENCRYPTION_OVERHEAD || writtenBytes > ENCRYPTED_CHUNK_SIZE))
) {
throw new Error("Invalid chunk size");
}
await writeChunkAtOffset(session.path, encChunkStream, chunkIndex, isLastChunk);
await UploadRepo.markChunkAsUploaded(sessionId, chunkIndex);
} catch (e) {
await safeUnlink(filePath);
if (
e instanceof Error &&
(e.message === "Invalid checksum" || e.message === "Invalid chunk size")
) {
if (e instanceof Error && e.message === "Invalid chunk size") {
error(400, "Invalid request body");
}
throw e;
@@ -84,5 +101,5 @@ export const uploadChunk = async (
export const cleanupExpiredUploadSessions = async () => {
const paths = await UploadRepo.cleanupExpiredUploadSessions();
await Promise.all(paths.map(safeRecursiveRm));
await Promise.all(paths.map(safeUnlink));
};