Files
v2.admin.gaertan.art/src/actions/artworks/timelapse.ts
2026-02-03 12:17:47 +01:00

126 lines
3.8 KiB
TypeScript

"use server";
import { prisma } from "@/lib/prisma";
import { s3 } from "@/lib/s3";
import {
confirmArtworkTimelapseUploadSchema,
createArtworkTimelapseUploadSchema,
deleteArtworkTimelapseSchema,
setArtworkTimelapseEnabledSchema,
} from "@/schemas/artworks/timelapse";
import type {
ConfirmArtworkTimelapseUploadInput,
CreateArtworkTimelapseUploadInput,
DeleteArtworkTimelapseInput,
SetArtworkTimelapseEnabledInput,
} from "@/schemas/artworks/timelapse";
import { PutObjectCommand, DeleteObjectCommand } from "@aws-sdk/client-s3";
import { getSignedUrl } from "@aws-sdk/s3-request-presigner";
import { revalidatePath } from "next/cache";
import { v4 as uuidv4 } from "uuid";
/**
* Creates a presigned PUT url so the client can upload large timelapse videos directly to S3
* (avoids Next.js body-size/proxy limits).
*/
export async function createArtworkTimelapseUpload(input: CreateArtworkTimelapseUploadInput) {
const { artworkId, fileName, mimeType, sizeBytes } =
createArtworkTimelapseUploadSchema.parse(input);
const ext = fileName.includes(".") ? fileName.split(".").pop() : undefined;
const suffix = ext ? `.${ext}` : "";
// Keep previous uploads unique so caching/CDNs won't bite you.
const fileId = uuidv4();
const s3Key = `timelapse/${artworkId}/${fileId}${suffix}`;
const cmd = new PutObjectCommand({
Bucket: `${process.env.BUCKET_NAME}`,
Key: s3Key,
ContentType: mimeType,
// If you want size enforcement at S3 level, you'd do that via policy; presigned PUT doesn't strictly enforce.
});
const uploadUrl = await getSignedUrl(s3, cmd, { expiresIn: 60 * 5 });
return { uploadUrl, s3Key, fileName, mimeType, sizeBytes };
}
/** Persist uploaded timelapse metadata in DB (upsert by artworkId). */
export async function confirmArtworkTimelapseUpload(input: ConfirmArtworkTimelapseUploadInput) {
const { artworkId, s3Key, fileName, mimeType, sizeBytes } =
confirmArtworkTimelapseUploadSchema.parse(input);
// If an old timelapse exists, delete the old object so you don't leak storage.
const existing = await prisma.artworkTimelapse.findUnique({ where: { artworkId } });
if (existing?.s3Key && existing.s3Key !== s3Key) {
try {
await s3.send(
new DeleteObjectCommand({
Bucket: `${process.env.BUCKET_NAME}`,
Key: existing.s3Key,
})
);
} catch (err) {
// don't fail the request if cleanup fails
console.warn("Failed to delete previous timelapse object", existing.s3Key, err);
}
}
await prisma.artworkTimelapse.upsert({
where: { artworkId },
create: {
artworkId,
s3Key,
fileName,
mimeType,
sizeBytes,
enabled: true,
},
update: {
s3Key,
fileName,
mimeType,
sizeBytes,
},
});
revalidatePath(`/artworks/${artworkId}`);
return { ok: true };
}
export async function setArtworkTimelapseEnabled(input: SetArtworkTimelapseEnabledInput) {
const { artworkId, enabled } = setArtworkTimelapseEnabledSchema.parse(input);
await prisma.artworkTimelapse.update({
where: { artworkId },
data: { enabled },
});
revalidatePath(`/artworks/${artworkId}`);
return { ok: true };
}
export async function deleteArtworkTimelapse(input: DeleteArtworkTimelapseInput) {
const { artworkId } = deleteArtworkTimelapseSchema.parse(input);
const existing = await prisma.artworkTimelapse.findUnique({ where: { artworkId } });
if (!existing) return { ok: true };
try {
await s3.send(
new DeleteObjectCommand({
Bucket: `${process.env.BUCKET_NAME}`,
Key: existing.s3Key,
})
);
} catch (err) {
console.warn("Failed to delete timelapse object", existing.s3Key, err);
}
await prisma.artworkTimelapse.delete({ where: { artworkId } });
revalidatePath(`/artworks/${artworkId}`);
return { ok: true };
}