diff --git a/apps/dokploy/components/dashboard/application/advanced/volumes/add-volumes.tsx b/apps/dokploy/components/dashboard/application/advanced/volumes/add-volumes.tsx index 2bfd6bbc0..3830dd839 100644 --- a/apps/dokploy/components/dashboard/application/advanced/volumes/add-volumes.tsx +++ b/apps/dokploy/components/dashboard/application/advanced/volumes/add-volumes.tsx @@ -1,5 +1,5 @@ import { zodResolver } from "@hookform/resolvers/zod"; -import { PlusIcon } from "lucide-react"; +import { Eye, EyeOff, Loader2, PlusIcon, Server } from "lucide-react"; import type React from "react"; import { useEffect, useState } from "react"; import { useForm } from "react-hook-form"; @@ -7,7 +7,9 @@ import { toast } from "sonner"; import { z } from "zod"; import { AlertBlock } from "@/components/shared/alert-block"; import { CodeEditor } from "@/components/shared/code-editor"; +import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; +import { Checkbox } from "@/components/ui/checkbox"; import { Dialog, DialogContent, @@ -19,6 +21,7 @@ import { import { Form, FormControl, + FormDescription, FormField, FormItem, FormLabel, @@ -27,6 +30,9 @@ import { import { Input } from "@/components/ui/input"; import { Label } from "@/components/ui/label"; import { RadioGroup, RadioGroupItem } from "@/components/ui/radio-group"; +import { ScrollArea } from "@/components/ui/scroll-area"; +import { Separator } from "@/components/ui/separator"; +import { Switch } from "@/components/ui/switch"; import { cn } from "@/lib/utils"; import { api } from "@/utils/api"; @@ -41,6 +47,7 @@ interface Props { | "mysql" | "mariadb" | "compose"; + serverId?: string; refetch: () => void; children?: React.ReactNode; } @@ -75,6 +82,35 @@ const mySchema = z.discriminatedUnion("type", [ content: z.string().optional(), }) .merge(mountSchema), + z + .object({ + type: z.literal("nfs"), + nfsServer: z.string().min(1, "NFS server address required"), + nfsPath: z.string().min(1, "NFS export path required"), + mountOptions: z.string().optional(), + username: z.string().optional(), + password: z.string().optional(), + mountMethod: z + .enum(["docker-volume", "host-mount"]) + .default("host-mount"), + replicateToSwarm: z.boolean().default(false), + targetNodes: z.array(z.string()).optional(), + }) + .merge(mountSchema), + z + .object({ + type: z.literal("smb"), + smbServer: z.string().min(1, "SMB server address required"), + smbShare: z.string().min(1, "SMB share name required"), + smbPath: z.string().optional(), + mountOptions: z.string().optional(), + username: z.string().min(1, "Username required for SMB"), + password: z.string().min(1, "Password required for SMB"), + domain: z.string().optional(), + replicateToSwarm: z.boolean().default(false), + targetNodes: z.array(z.string()).optional(), + }) + .merge(mountSchema), ]); type AddMount = z.infer; @@ -82,20 +118,35 @@ type AddMount = z.infer; export const AddVolumes = ({ serviceId, serviceType, + serverId, refetch, children = , }: Props) => { const [isOpen, setIsOpen] = useState(false); + const [showPassword, setShowPassword] = useState(false); const { mutateAsync } = api.mounts.create.useMutation(); const form = useForm({ defaultValues: { type: serviceType === "compose" ? "file" : "bind", hostPath: "", mountPath: serviceType === "compose" ? "/" : "", + mountMethod: "host-mount", + replicateToSwarm: false, + targetNodes: [], }, resolver: zodResolver(mySchema), }); const type = form.watch("type"); + const mountMethod = form.watch("mountMethod"); + const replicateToSwarm = form.watch("replicateToSwarm"); + const targetNodes = form.watch("targetNodes") || []; + + // Fetch available swarm nodes + const { data: availableNodes, isLoading: nodesLoading } = + api.mounts.getAvailableNodes.useQuery( + { serverId }, + { enabled: (type === "nfs" || type === "smb") && !!serverId }, + ); useEffect(() => { form.reset(); @@ -148,6 +199,59 @@ export const AddVolumes = ({ .catch(() => { toast.error("Error creating the File mount"); }); + } else if (data.type === "nfs") { + await mutateAsync({ + serviceId, + nfsServer: data.nfsServer, + nfsPath: data.nfsPath, + mountPath: data.mountPath, + mountOptions: data.mountOptions, + mountMethod: data.mountMethod || "host-mount", + type: data.type, + serviceType, + username: data.username, + password: data.password, + replicateToSwarm: data.replicateToSwarm || false, + targetNodes: data.replicateToSwarm ? data.targetNodes : undefined, + }) + .then(() => { + toast.success("NFS Mount Created"); + setIsOpen(false); + }) + .catch((err) => { + toast.error( + `Error creating the NFS mount: ${ + err instanceof Error ? err.message : "Unknown error" + }`, + ); + }); + } else if (data.type === "smb") { + await mutateAsync({ + serviceId, + smbServer: data.smbServer, + smbShare: data.smbShare, + smbPath: data.smbPath, + mountPath: data.mountPath, + mountOptions: data.mountOptions, + type: data.type, + serviceType, + username: data.username, + password: data.password, + domain: data.domain, + replicateToSwarm: data.replicateToSwarm || false, + targetNodes: data.replicateToSwarm ? data.targetNodes : undefined, + }) + .then(() => { + toast.success("SMB Mount Created"); + setIsOpen(false); + }) + .catch((err) => { + toast.error( + `Error creating the SMB mount: ${ + err instanceof Error ? err.message : "Unknown error" + }`, + ); + }); } refetch(); @@ -249,6 +353,44 @@ export const AddVolumes = ({ )} + {serviceType !== "compose" && ( + + +
+ + +
+
+
+ )} + {serviceType !== "compose" && ( + + +
+ + +
+
+
+ )} )} + {(type === "nfs" || type === "smb") && ( + <> + {type === "nfs" && ( + <> + ( + + Mount Method + + Choose how to mount the NFS share + + + + + + + + +
+ + Docker Native Volume + + + Simpler, Docker-managed lifecycle. + Recommended for most NFS mounts. + +
+
+
+ + + + + +
+ + Host-Level Mount + + + Full control, mount on host then bind + into container. Use for advanced + configurations. + +
+
+
+
+
+ +
+ )} + /> + + ( + + NFS Server + + + + + IP address or hostname of the NFS server + + + + )} + /> + ( + + NFS Export Path + + + + + Path to the NFS export on the server + + + + )} + /> + + )} + {type === "smb" && ( + <> + ( + + SMB Server + + + + + IP address or hostname of the SMB server + + + + )} + /> + ( + + SMB Share Name + + + + + Name of the SMB share + + + + )} + /> + ( + + SMB Subdirectory (Optional) + + + + + Optional subdirectory within the share + + + + )} + /> + + )} + ( + + Mount Options (Optional) + + + + + Additional mount options (e.g., vers=4.0,soft) + + + + )} + /> + {(type === "nfs" || type === "smb") && ( + <> + + ( + + + Username + {type === "smb" ? " (Required)" : " (Optional)"} + + + + + + {type === "smb" + ? "Username for SMB authentication" + : "Username for NFS authentication (if required)"} + + + + )} + /> + ( + + + Password + {type === "smb" ? " (Required)" : " (Optional)"} + + +
+ + +
+
+ + {type === "smb" + ? "Password for SMB authentication" + : "Password for NFS authentication (if required)"} + + +
+ )} + /> + {type === "smb" && ( + ( + + Domain (Optional) + + + + + Windows domain for SMB authentication + + + + )} + /> + )} + + )} + + ( + +
+ + Replicate to Swarm Nodes + + + {mountMethod === "docker-volume" + ? "Create Docker volume on selected Swarm nodes" + : "Distribute this mount to selected Docker Swarm nodes"} + +
+ + + +
+ )} + /> + {replicateToSwarm && ( + ( + +
+ + Select Swarm Nodes + + + Choose which nodes should have this mount. At + least one node must be selected. + +
+ {nodesLoading ? ( +
+ +
+ ) : availableNodes && availableNodes.length > 0 ? ( + +
+ {availableNodes.map((node) => ( + { + return ( + + + { + return checked + ? field.onChange([ + ...(field.value || []), + node.nodeId, + ]) + : field.onChange( + field.value?.filter( + (value) => + value !== + node.nodeId, + ) || [], + ); + }} + /> + +
+ + {node.hostname} + + {node.role} + + {node.availability === + "active" && ( + + Active + + )} + + + {node.ip} • {node.status} + {node.labels && + Object.keys(node.labels) + .length > 0 && ( + + {" "} + •{" "} + {Object.entries( + node.labels, + ) + .map( + ([key, value]) => + `${key}=${value}`, + ) + .join(", ")} + + )} + +
+
+ ); + }} + /> + ))} +
+
+ ) : ( + +

+ No swarm nodes available. Make sure Docker + Swarm is initialized and nodes are accessible. +

+
+ )} + +
+ )} + /> + )} + + )} {serviceType !== "compose" && ( { {data && data?.mounts.length > 0 && ( - + Add Volume )} @@ -63,7 +68,12 @@ export const ShowVolumes = ({ id, type }: Props) => { No volumes/mounts configured - + Add Volume @@ -113,6 +123,55 @@ export const ShowVolumes = ({ id, type }: Props) => { )} + {mount.type === "nfs" && ( + <> +
+ NFS Server + + {mount.nfsServer} + +
+
+ NFS Path + + {mount.nfsPath} + +
+ + )} + {mount.type === "smb" && ( + <> +
+ SMB Server + + {mount.smbServer} + +
+
+ SMB Share + + {mount.smbShare} + +
+ {mount.smbPath && ( +
+ SMB Path + + {mount.smbPath} + +
+ )} + + )} + {(mount.type === "nfs" || mount.type === "smb") && + mount.replicateToSwarm && ( +
+ Swarm Replication + + {mount.targetNodes?.length || 0} node(s) + +
+ )} {mount.type === "file" && (
File Path diff --git a/apps/dokploy/server/api/routers/mount.ts b/apps/dokploy/server/api/routers/mount.ts index 814d3d392..3aae14a7f 100644 --- a/apps/dokploy/server/api/routers/mount.ts +++ b/apps/dokploy/server/api/routers/mount.ts @@ -6,6 +6,15 @@ import { findMountOrganizationId, getServiceContainer, updateMount, + getSwarmNodes, + findServerById, + distributeCredentialsToNodes, + syncMountToAllNodes, + verifyMountsOnNodes, + cleanupMountFromNodes, + testNodeConnectivity, + getSwarmNodesForMount, + getServerId, } from "@dokploy/server"; import { TRPCError } from "@trpc/server"; import { z } from "zod"; @@ -16,6 +25,9 @@ import { apiUpdateMount, } from "@/server/db/schema"; import { createTRPCRouter, protectedProcedure } from "../trpc"; +import { db } from "@/server/db"; +import { mountNodeStatus } from "@dokploy/server/db/schema/mount-node-status"; +import { eq, and } from "drizzle-orm"; export const mountRouter = createTRPCRouter({ create: protectedProcedure @@ -71,4 +83,198 @@ export const mountRouter = createTRPCRouter({ ); return mounts; }), + getAvailableNodes: protectedProcedure + .input( + z.object({ + serverId: z.string().optional(), + }), + ) + .query(async ({ input, ctx }) => { + if (input.serverId) { + const server = await findServerById(input.serverId); + if (server.organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ code: "UNAUTHORIZED" }); + } + } + const nodes = await getSwarmNodes(input.serverId || undefined); + return ( + nodes?.map((node) => ({ + nodeId: node.ID, + hostname: node.Description?.Hostname || node.ID, + ip: node.Status?.Addr || "", + role: node.Spec.Role, + status: node.Status?.State || "unknown", + availability: node.Spec.Availability, + labels: node.Spec.Labels || {}, + })) || [] + ); + }), + testNodeConnectivity: protectedProcedure + .input( + z.object({ + nodeId: z.string().min(1), + serverId: z.string().optional(), + nfsServer: z.string().optional(), + smbServer: z.string().optional(), + }), + ) + .mutation(async ({ input, ctx }) => { + if (input.serverId) { + const server = await findServerById(input.serverId); + if (server.organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ code: "UNAUTHORIZED" }); + } + } + return await testNodeConnectivity( + input.nodeId, + input.nfsServer, + input.smbServer, + input.serverId || undefined, + ); + }), + syncMountToSwarm: protectedProcedure + .input( + z.object({ + mountId: z.string().min(1), + nodeIds: z.array(z.string()).min(1), + }), + ) + .mutation(async ({ input, ctx }) => { + const organizationId = await findMountOrganizationId(input.mountId); + if (organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ + code: "UNAUTHORIZED", + message: "You are not authorized to sync this mount", + }); + } + + const mount = await findMountById(input.mountId); + const serverId = await getServerId(mount); + + // Distribute credentials if needed + if (mount.credentialsId) { + await distributeCredentialsToNodes( + mount, + input.nodeIds, + serverId, + ); + } + + // Sync mounts to nodes + const results = await syncMountToAllNodes( + mount, + input.nodeIds, + serverId, + ); + + return Array.from(results.entries()).map(([nodeId, result]) => ({ + nodeId, + ...result, + })); + }), + getMountNodeStatus: protectedProcedure + .input(apiFindOneMount) + .query(async ({ input, ctx }) => { + const organizationId = await findMountOrganizationId(input.mountId); + if (organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ + code: "UNAUTHORIZED", + message: "You are not authorized to access this mount", + }); + } + + const statuses = await db.query.mountNodeStatus.findMany({ + where: eq(mountNodeStatus.mountId, input.mountId), + }); + + return statuses; + }), + verifyMountsOnNodes: protectedProcedure + .input( + z.object({ + mountId: z.string().min(1), + nodeIds: z.array(z.string()).min(1).optional(), + }), + ) + .mutation(async ({ input, ctx }) => { + const organizationId = await findMountOrganizationId(input.mountId); + if (organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ + code: "UNAUTHORIZED", + message: "You are not authorized to verify this mount", + }); + } + + const mount = await findMountById(input.mountId); + const serverId = await getServerId(mount); + + const nodeIds = + input.nodeIds || mount.targetNodes || []; + + if (nodeIds.length === 0) { + return []; + } + + const results = await verifyMountsOnNodes( + input.mountId, + nodeIds, + serverId, + ); + + return Array.from(results.entries()).map(([nodeId, result]) => ({ + nodeId, + ...result, + })); + }), + updateMountNodes: protectedProcedure + .input( + z.object({ + mountId: z.string().min(1), + nodeIds: z.array(z.string()).min(1), + }), + ) + .mutation(async ({ input, ctx }) => { + const organizationId = await findMountOrganizationId(input.mountId); + if (organizationId !== ctx.session.activeOrganizationId) { + throw new TRPCError({ + code: "UNAUTHORIZED", + message: "You are not authorized to update this mount", + }); + } + + const mount = await findMountById(input.mountId); + const serverId = await getServerId(mount); + + // Get current target nodes + const currentNodes = mount.targetNodes || []; + const newNodes = input.nodeIds; + + // Find nodes to add and remove + const nodesToAdd = newNodes.filter((n) => !currentNodes.includes(n)); + const nodesToRemove = currentNodes.filter((n) => !newNodes.includes(n)); + + // Remove mounts from nodes that are no longer targeted + if (nodesToRemove.length > 0) { + await cleanupMountFromNodes(mount, nodesToRemove, serverId); + } + + // Add mounts to new nodes + if (nodesToAdd.length > 0) { + if (mount.credentialsId) { + await distributeCredentialsToNodes( + mount, + nodesToAdd, + serverId, + ); + } + await syncMountToAllNodes(mount, nodesToAdd, serverId); + } + + // Update mount with new target nodes + await updateMount(input.mountId, { + targetNodes: newNodes, + }); + + return true; + }), }); diff --git a/packages/server/src/db/schema/index.ts b/packages/server/src/db/schema/index.ts index c16ef1452..a8bc7b42f 100644 --- a/packages/server/src/db/schema/index.ts +++ b/packages/server/src/db/schema/index.ts @@ -16,6 +16,8 @@ export * from "./gitlab"; export * from "./mariadb"; export * from "./mongo"; export * from "./mount"; +export * from "./mount-credentials"; +export * from "./mount-node-status"; export * from "./mysql"; export * from "./notification"; export * from "./port"; diff --git a/packages/server/src/db/schema/mount-credentials.ts b/packages/server/src/db/schema/mount-credentials.ts new file mode 100644 index 000000000..734de479f --- /dev/null +++ b/packages/server/src/db/schema/mount-credentials.ts @@ -0,0 +1,55 @@ +import { relations } from "drizzle-orm"; +import { pgTable, text } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { nanoid } from "nanoid"; +import { z } from "zod"; +import { mounts } from "./mount"; + +export const mountCredentials = pgTable("mount_credentials", { + credentialsId: text("credentialsId") + .notNull() + .primaryKey() + .$defaultFn(() => nanoid()), + mountId: text("mountId") + .notNull() + .references(() => mounts.mountId, { onDelete: "cascade" }), + username: text("username").notNull(), // Encrypted + password: text("password").notNull(), // Encrypted + domain: text("domain"), // For SMB + createdAt: text("createdAt") + .notNull() + .$defaultFn(() => new Date().toISOString()), + updatedAt: text("updatedAt") + .notNull() + .$defaultFn(() => new Date().toISOString()), +}); + +export const mountCredentialsRelations = relations( + mountCredentials, + ({ one }) => ({ + mount: one(mounts, { + fields: [mountCredentials.mountId], + references: [mounts.mountId], + }), + }), +); + +export const apiCreateMountCredentials = createInsertSchema( + mountCredentials, + { + credentialsId: z.string().optional(), + mountId: z.string().min(1), + username: z.string().min(1), + password: z.string().min(1), + domain: z.string().optional(), + createdAt: z.string().optional(), + updatedAt: z.string().optional(), + }, +); + +export const apiUpdateMountCredentials = apiCreateMountCredentials + .partial() + .extend({ + credentialsId: z.string().min(1), + }); + diff --git a/packages/server/src/db/schema/mount-node-status.ts b/packages/server/src/db/schema/mount-node-status.ts new file mode 100644 index 000000000..788efb11b --- /dev/null +++ b/packages/server/src/db/schema/mount-node-status.ts @@ -0,0 +1,77 @@ +import { relations } from "drizzle-orm"; +import { pgEnum, pgTable, text, timestamp } from "drizzle-orm/pg-core"; +import { createInsertSchema } from "drizzle-zod"; +import { nanoid } from "nanoid"; +import { z } from "zod"; +import { mounts } from "./mount"; +import { server } from "./server"; + +export const mountNodeStatusEnum = pgEnum("mountNodeStatus", [ + "pending", + "mounted", + "failed", + "unmounted", +]); + +export const mountNodeStatus = pgTable("mount_node_status", { + mountNodeStatusId: text("mountNodeStatusId") + .notNull() + .primaryKey() + .$defaultFn(() => nanoid()), + mountId: text("mountId") + .notNull() + .references(() => mounts.mountId, { onDelete: "cascade" }), + nodeId: text("nodeId").notNull(), // Docker Swarm node ID + nodeHostname: text("nodeHostname"), // Node hostname/IP for reference + serverId: text("serverId").references(() => server.serverId, { + onDelete: "set null", + }), + mountStatus: mountNodeStatusEnum("mountStatus") + .notNull() + .default("pending"), + lastVerified: timestamp("lastVerified"), + errorMessage: text("errorMessage"), + createdAt: text("createdAt") + .notNull() + .$defaultFn(() => new Date().toISOString()), + updatedAt: text("updatedAt") + .notNull() + .$defaultFn(() => new Date().toISOString()), +}); + +export const mountNodeStatusRelations = relations( + mountNodeStatus, + ({ one }) => ({ + mount: one(mounts, { + fields: [mountNodeStatus.mountId], + references: [mounts.mountId], + }), + server: one(server, { + fields: [mountNodeStatus.serverId], + references: [server.serverId], + }), + }), +); + +export const apiCreateMountNodeStatus = createInsertSchema( + mountNodeStatus, + { + mountNodeStatusId: z.string().optional(), + mountId: z.string().min(1), + nodeId: z.string().min(1), + nodeHostname: z.string().optional(), + serverId: z.string().optional(), + mountStatus: z.enum(["pending", "mounted", "failed", "unmounted"]), + lastVerified: z.date().optional(), + errorMessage: z.string().optional(), + createdAt: z.string().optional(), + updatedAt: z.string().optional(), + }, +); + +export const apiUpdateMountNodeStatus = apiCreateMountNodeStatus + .partial() + .extend({ + mountNodeStatusId: z.string().min(1), + }); + diff --git a/packages/server/src/db/schema/mount.ts b/packages/server/src/db/schema/mount.ts index 299f39caf..b93295896 100644 --- a/packages/server/src/db/schema/mount.ts +++ b/packages/server/src/db/schema/mount.ts @@ -1,5 +1,5 @@ import { relations } from "drizzle-orm"; -import { pgEnum, pgTable, text } from "drizzle-orm/pg-core"; +import { boolean, jsonb, pgEnum, pgTable, text } from "drizzle-orm/pg-core"; import { createInsertSchema } from "drizzle-zod"; import { nanoid } from "nanoid"; import { z } from "zod"; @@ -10,6 +10,8 @@ import { mongo } from "./mongo"; import { mysql } from "./mysql"; import { postgres } from "./postgres"; import { redis } from "./redis"; +import { mountCredentials } from "./mount-credentials"; +import { mountNodeStatus } from "./mount-node-status"; export const serviceType = pgEnum("serviceType", [ "application", @@ -21,7 +23,18 @@ export const serviceType = pgEnum("serviceType", [ "compose", ]); -export const mountType = pgEnum("mountType", ["bind", "volume", "file"]); +export const mountType = pgEnum("mountType", [ + "bind", + "volume", + "file", + "nfs", + "smb", +]); + +export const mountMethod = pgEnum("mountMethod", [ + "docker-volume", + "host-mount", +]); export const mounts = pgTable("mount", { mountId: text("mountId") @@ -35,6 +48,28 @@ export const mounts = pgTable("mount", { content: text("content"), serviceType: serviceType("serviceType").notNull().default("application"), mountPath: text("mountPath").notNull(), + // NFS fields + nfsServer: text("nfsServer"), + nfsPath: text("nfsPath"), + // SMB fields + smbServer: text("smbServer"), + smbShare: text("smbShare"), + smbPath: text("smbPath"), + // Common network mount fields + mountOptions: text("mountOptions"), + credentialsId: text("credentialsId"), + replicateToSwarm: boolean("replicateToSwarm").notNull().default(false), + targetNodes: text("targetNodes").array(), + mountPathOnHost: text("mountPathOnHost"), + mountMethod: mountMethod("mountMethod").notNull().default("host-mount"), + dockerVolumeName: text("dockerVolumeName"), // For tracking Docker volumes + nodeSpecificConfig: jsonb("nodeSpecificConfig").$type>(), applicationId: text("applicationId").references( () => applications.applicationId, { onDelete: "cascade" }, @@ -59,7 +94,7 @@ export const mounts = pgTable("mount", { }), }); -export const MountssRelations = relations(mounts, ({ one }) => ({ +export const MountssRelations = relations(mounts, ({ one, many }) => ({ application: one(applications, { fields: [mounts.applicationId], references: [applications.applicationId], @@ -88,17 +123,43 @@ export const MountssRelations = relations(mounts, ({ one }) => ({ fields: [mounts.composeId], references: [compose.composeId], }), + credentials: one(mountCredentials, { + fields: [mounts.credentialsId], + references: [mountCredentials.credentialsId], + relationName: "mountCredentials", + }), + nodeStatuses: many(mountNodeStatus), })); const createSchema = createInsertSchema(mounts, { applicationId: z.string(), - type: z.enum(["bind", "volume", "file"]), + type: z.enum(["bind", "volume", "file", "nfs", "smb"]), hostPath: z.string().optional(), volumeName: z.string().optional(), content: z.string().optional(), mountPath: z.string().min(1), mountId: z.string().optional(), filePath: z.string().optional(), + nfsServer: z.string().optional(), + nfsPath: z.string().optional(), + smbServer: z.string().optional(), + smbShare: z.string().optional(), + smbPath: z.string().optional(), + mountOptions: z.string().optional(), + credentialsId: z.string().optional(), + replicateToSwarm: z.boolean().default(false), + targetNodes: z.array(z.string()).optional(), + mountPathOnHost: z.string().optional(), + mountMethod: z.enum(["docker-volume", "host-mount"]).default("host-mount"), + dockerVolumeName: z.string().optional(), + nodeSpecificConfig: z + .record( + z.object({ + mountPath: z.string().optional(), + mountOptions: z.string().optional(), + }), + ) + .optional(), serviceType: z .enum([ "application", @@ -125,10 +186,67 @@ export const apiCreateMount = createSchema mountPath: true, serviceType: true, filePath: true, + nfsServer: true, + nfsPath: true, + smbServer: true, + smbShare: true, + smbPath: true, + mountOptions: true, + replicateToSwarm: true, + targetNodes: true, + mountPathOnHost: true, + mountMethod: true, + nodeSpecificConfig: true, }) .extend({ serviceId: z.string().min(1), - }); + // Credentials for NFS/SMB (optional, plaintext - will be encrypted) + username: z.string().optional(), + password: z.string().optional(), + domain: z.string().optional(), // For SMB + }) + .refine( + (data) => { + if (data.type === "nfs") { + return !!data.nfsServer && !!data.nfsPath; + } + if (data.type === "smb") { + return !!data.smbServer && !!data.smbShare; + } + return true; + }, + { + message: + "NFS mounts require nfsServer and nfsPath. SMB mounts require smbServer and smbShare.", + }, + ) + .refine( + (data) => { + if (data.replicateToSwarm) { + return ( + Array.isArray(data.targetNodes) && data.targetNodes.length > 0 + ); + } + return true; + }, + { + message: + "targetNodes must be specified when replicateToSwarm is true", + }, + ) + .refine( + (data) => { + // SMB can only use host-mount method (no native Docker support) + if (data.type === "smb" && data.mountMethod === "docker-volume") { + return false; + } + return true; + }, + { + message: + "SMB mounts can only use host-mount method. Docker native volumes are not supported for SMB.", + }, + ); export const apiFindOneMount = createSchema .pick({ diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index e6d753293..c3591c2ed 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -91,6 +91,11 @@ export * from "./utils/docker/utils"; export * from "./utils/filesystem/directory"; export * from "./utils/filesystem/ssh"; export * from "./utils/gpu-setup"; +export * from "./utils/mounts/host-mount"; +export * from "./utils/mounts/swarm-replication"; +export * from "./utils/mounts/node-mount-manager"; +export * from "./utils/mounts/docker-volumes"; +export * from "./utils/encryption/mount-credentials"; export * from "./utils/notifications/build-error"; export * from "./utils/notifications/build-success"; export * from "./utils/notifications/database-backup"; @@ -123,4 +128,9 @@ export * from "./utils/traefik/types"; export * from "./utils/traefik/web-server"; export * from "./utils/volume-backups/index"; export * from "./utils/watch-paths/should-deploy"; +export * from "./utils/errors/structured-errors"; +export * from "./utils/errors/retry"; +export * from "./utils/security/validation"; +export * from "./utils/cache/simple-cache"; +export * from "./utils/cache/query-cache"; export * from "./wss/utils"; diff --git a/packages/server/src/services/mount.ts b/packages/server/src/services/mount.ts index f08a32312..d2d6bbf0c 100644 --- a/packages/server/src/services/mount.ts +++ b/packages/server/src/services/mount.ts @@ -4,6 +4,7 @@ import { db } from "@dokploy/server/db"; import { type apiCreateMount, mounts, + mountCredentials, type ServiceType, } from "@dokploy/server/db/schema"; import { @@ -16,6 +17,26 @@ import { execAsync, execAsyncRemote, } from "@dokploy/server/utils/process/execAsync"; +import { encryptMountCredentials } from "@dokploy/server/utils/encryption/mount-credentials"; +import { + distributeCredentialsToNodes, + syncMountToAllNodes, +} from "@dokploy/server/utils/mounts/swarm-replication"; +import { + createNFSVolume, + removeNFSVolume, + syncDockerVolumesToNodes, +} from "@dokploy/server/utils/mounts/docker-volumes"; +import { + validateMountPath, + validateNFSServer, + validateSMBServer, + validateNFSPath, + validateSMBShare, + validateSMBPath, + sanitizeUsername, + validateMountOptions, +} from "@dokploy/server/utils/security/validation"; import { TRPCError } from "@trpc/server"; import { eq, type SQL, sql } from "drizzle-orm"; @@ -23,11 +44,90 @@ export type Mount = typeof mounts.$inferSelect; export const createMount = async (input: typeof apiCreateMount._type) => { try { - const { serviceId, ...rest } = input; + const { serviceId, username, password, domain, ...rest } = input; + + // Validate mount path + validateMountPath(input.mountPath); + + // Validate NFS/SMB specific fields + if (input.type === "nfs") { + if (input.nfsServer) { + validateNFSServer(input.nfsServer); + } + if (input.nfsPath) { + validateNFSPath(input.nfsPath); + } + } + + if (input.type === "smb") { + if (input.smbServer) { + validateSMBServer(input.smbServer); + } + if (input.smbShare) { + validateSMBShare(input.smbShare); + } + if (input.smbPath) { + validateSMBPath(input.smbPath); + } + } + + // Validate and sanitize credentials + let sanitizedUsername: string | undefined; + if (username) { + sanitizedUsername = sanitizeUsername(username); + } + + // Validate mount options + if (input.mountOptions) { + validateMountOptions(input.mountOptions); + } + + // Handle credentials for NFS/SMB mounts + let credentialsId: string | undefined; + if ( + (input.type === "nfs" || input.type === "smb") && + sanitizedUsername && + password + ) { + // Encrypt credentials + const encrypted = await encryptMountCredentials({ + username: sanitizedUsername, + password, + domain: domain ? sanitizeUsername(domain) : undefined, + }); + + // Create credentials record (will be linked after mount is created) + const credentials = await db + .insert(mountCredentials) + .values({ + username: encrypted.username, + password: encrypted.password, + domain: encrypted.domain, + mountId: "", // Will be updated after mount is created + }) + .returning() + .then((value) => value[0]); + + if (!credentials) { + throw new TRPCError({ + code: "BAD_REQUEST", + message: "Error creating credentials", + }); + } + + credentialsId = credentials.credentialsId; + } + + // mountPathOnHost will be set after mount creation using the mountId + const value = await db .insert(mounts) .values({ ...rest, + credentialsId, + ...(input.mountPathOnHost && { + mountPathOnHost: input.mountPathOnHost, + }), ...(input.serviceType === "application" && { applicationId: serviceId, }), @@ -60,9 +160,103 @@ export const createMount = async (input: typeof apiCreateMount._type) => { }); } - if (value.type === "file") { - await createFileMount(value.mountId); + // Update credentials with mountId if credentials were created + if (credentialsId) { + await db + .update(mountCredentials) + .set({ mountId: value.mountId }) + .where(eq(mountCredentials.credentialsId, credentialsId)); + } + + // Determine mount method (default to host-mount for backward compatibility) + const mountMethod = value.mountMethod || "host-mount"; + + // Handle Docker volume creation for NFS + if ( + value.type === "nfs" && + mountMethod === "docker-volume" && + value.nfsServer && + value.nfsPath + ) { + // Generate Docker volume name + const dockerVolumeName = `dokploy-nfs-${value.mountId}`; + + // Get serverId for the service + const serverId = await getServerId(await findMountById(value.mountId)); + + // Create Docker volume + await createNFSVolume({ + volumeName: dockerVolumeName, + nfsServer: value.nfsServer, + nfsPath: value.nfsPath, + mountOptions: value.mountOptions || undefined, + serverId, + }); + + // Update mount with Docker volume name + await db + .update(mounts) + .set({ dockerVolumeName }) + .where(eq(mounts.mountId, value.mountId)); + value.dockerVolumeName = dockerVolumeName; + + // Sync Docker volumes to selected nodes if swarm replication is enabled + if ( + value.replicateToSwarm && + value.targetNodes && + value.targetNodes.length > 0 + ) { + await syncDockerVolumesToNodes( + dockerVolumeName, + value.nfsServer, + value.nfsPath, + value.mountOptions || undefined, + value.targetNodes, + serverId, + ); + } + } else { + // Set mountPathOnHost if not provided and this is a host-level network mount + if ( + !value.mountPathOnHost && + (value.type === "nfs" || value.type === "smb") && + mountMethod === "host-mount" + ) { + const defaultMountPath = `/mnt/dokploy-${value.type}-${value.mountId}`; + await db + .update(mounts) + .set({ mountPathOnHost: defaultMountPath }) + .where(eq(mounts.mountId, value.mountId)); + value.mountPathOnHost = defaultMountPath; + } + + // Handle different mount types + if (value.type === "file") { + await createFileMount(value.mountId); + } else if ( + (value.type === "nfs" || value.type === "smb") && + mountMethod === "host-mount" && + value.replicateToSwarm && + value.targetNodes && + value.targetNodes.length > 0 + ) { + // Get serverId for the service + const serverId = await getServerId(await findMountById(value.mountId)); + + // Distribute credentials to nodes + if (credentialsId) { + await distributeCredentialsToNodes( + value, + value.targetNodes, + serverId, + ); + } + + // Sync mounts to all target nodes + await syncMountToAllNodes(value, value.targetNodes, serverId); + } } + return value; } catch (error) { console.log(error); @@ -273,10 +467,30 @@ export const findMountsByApplicationId = async ( }; export const deleteMount = async (mountId: string) => { - const { type } = await findMountById(mountId); + const mount = await findMountById(mountId); + const { type } = mount; + const mountMethod = mount.mountMethod || "host-mount"; if (type === "file") { await deleteFileMount(mountId); + } else if (type === "nfs" || type === "smb") { + if (mountMethod === "docker-volume" && mount.dockerVolumeName) { + // Remove Docker volume + const serverId = await getServerId(mount); + await removeNFSVolume(mount.dockerVolumeName, serverId); + } else if ( + mountMethod === "host-mount" && + mount.replicateToSwarm && + mount.targetNodes && + mount.targetNodes.length > 0 + ) { + // Cleanup network mounts from swarm nodes + const { cleanupMountFromNodes } = await import( + "../utils/mounts/swarm-replication" + ); + const serverId = await getServerId(mount); + await cleanupMountFromNodes(mount, mount.targetNodes, serverId); + } } const deletedMount = await db diff --git a/packages/server/src/services/rollbacks.ts b/packages/server/src/services/rollbacks.ts index 2e9da4ee1..40c593977 100644 --- a/packages/server/src/services/rollbacks.ts +++ b/packages/server/src/services/rollbacks.ts @@ -13,6 +13,7 @@ import { calculateResources, generateBindMounts, generateConfigContainer, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../utils/docker/utils"; @@ -210,6 +211,7 @@ const rollbackApplication = async ( }); const volumesMount = generateVolumeMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const { HealthCheck, @@ -247,7 +249,11 @@ const rollbackApplication = async ( HealthCheck, Image: rollbackImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ], ...(command ? { Command: ["/bin/sh"], diff --git a/packages/server/src/utils/builders/index.ts b/packages/server/src/utils/builders/index.ts index be6fcbcf0..db0876659 100644 --- a/packages/server/src/utils/builders/index.ts +++ b/packages/server/src/utils/builders/index.ts @@ -6,6 +6,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -113,6 +114,7 @@ export const mechanizeDockerContainer = async ( } = generateConfigContainer(application); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, application); const envVariables = prepareEnvironmentVariables( env, @@ -132,7 +134,12 @@ export const mechanizeDockerContainer = async ( HealthCheck, Image: image, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(command && { diff --git a/packages/server/src/utils/cache/query-cache.ts b/packages/server/src/utils/cache/query-cache.ts new file mode 100644 index 000000000..0998c51b7 --- /dev/null +++ b/packages/server/src/utils/cache/query-cache.ts @@ -0,0 +1,44 @@ +/** + * Query result caching utilities for database queries + */ + +import { nodeCache, serverCache, mountCache } from "./simple-cache"; + +/** + * Cache key generators + */ +export const getNodeCacheKey = (nodeId: string, serverId?: string): string => { + return `node:${nodeId}:${serverId || "local"}`; +}; + +export const getServerCacheKey = (serverId: string): string => { + return `server:${serverId}`; +}; + +export const getMountCacheKey = (mountId: string): string => { + return `mount:${mountId}`; +}; + +export const getSwarmNodesCacheKey = (serverId?: string): string => { + return `swarm:nodes:${serverId || "local"}`; +}; + +/** + * Invalidate cache entries + */ +export const invalidateNodeCache = (nodeId: string, serverId?: string): void => { + nodeCache.delete(getNodeCacheKey(nodeId, serverId)); +}; + +export const invalidateServerCache = (serverId: string): void => { + serverCache.delete(getServerCacheKey(serverId)); +}; + +export const invalidateMountCache = (mountId: string): void => { + mountCache.delete(getMountCacheKey(mountId)); +}; + +export const invalidateSwarmNodesCache = (serverId?: string): void => { + nodeCache.delete(getSwarmNodesCacheKey(serverId)); +}; + diff --git a/packages/server/src/utils/cache/simple-cache.ts b/packages/server/src/utils/cache/simple-cache.ts new file mode 100644 index 000000000..50ed4eb99 --- /dev/null +++ b/packages/server/src/utils/cache/simple-cache.ts @@ -0,0 +1,128 @@ +/** + * Simple in-memory cache with TTL support + */ + +interface CacheEntry { + value: T; + expiresAt: number; +} + +export class SimpleCache { + private cache = new Map>(); + private defaultTTL: number; + + constructor(defaultTTL: number = 60000) { + // Default TTL: 60 seconds + this.defaultTTL = defaultTTL; + } + + /** + * Get value from cache + */ + get(key: string): T | undefined { + const entry = this.cache.get(key); + + if (!entry) { + return undefined; + } + + // Check if expired + if (Date.now() > entry.expiresAt) { + this.cache.delete(key); + return undefined; + } + + return entry.value; + } + + /** + * Set value in cache + */ + set(key: string, value: T, ttl?: number): void { + const expiresAt = Date.now() + (ttl || this.defaultTTL); + this.cache.set(key, { value, expiresAt }); + } + + /** + * Delete value from cache + */ + delete(key: string): void { + this.cache.delete(key); + } + + /** + * Clear all cache entries + */ + clear(): void { + this.cache.clear(); + } + + /** + * Check if key exists and is not expired + */ + has(key: string): boolean { + const entry = this.cache.get(key); + if (!entry) { + return false; + } + + if (Date.now() > entry.expiresAt) { + this.cache.delete(key); + return false; + } + + return true; + } + + /** + * Clean expired entries + */ + cleanExpired(): void { + const now = Date.now(); + for (const [key, entry] of this.cache.entries()) { + if (now > entry.expiresAt) { + this.cache.delete(key); + } + } + } + + /** + * Get cache size + */ + size(): number { + return this.cache.size; + } +} + +/** + * Global cache instances for different use cases + */ +export const mountCache = new SimpleCache(300000); // 5 minutes +export const nodeCache = new SimpleCache(60000); // 1 minute +export const serverCache = new SimpleCache(300000); // 5 minutes + +/** + * Cache decorator for async functions + */ +export const cached = Promise>( + fn: T, + keyGenerator: (...args: Parameters) => string, + ttl?: number, +): T => { + return (async (...args: Parameters) => { + const key = keyGenerator(...args); + const cache = new SimpleCache(ttl || 60000); + + // Try to get from cache + const cached = cache.get(key); + if (cached !== undefined) { + return cached; + } + + // Execute function and cache result + const result = await fn(...args); + cache.set(key, result, ttl); + return result; + }) as T; +}; + diff --git a/packages/server/src/utils/databases/mariadb.ts b/packages/server/src/utils/databases/mariadb.ts index a73dd8bb4..b79cc81a6 100644 --- a/packages/server/src/utils/databases/mariadb.ts +++ b/packages/server/src/utils/databases/mariadb.ts @@ -5,6 +5,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -62,6 +63,7 @@ export const buildMariadb = async (mariadb: MariadbNested) => { ); const volumesMount = generateVolumeMounts(mounts); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, mariadb); const docker = await getRemoteDocker(mariadb.serverId); @@ -73,7 +75,12 @@ export const buildMariadb = async (mariadb: MariadbNested) => { HealthCheck, Image: dockerImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(command && { diff --git a/packages/server/src/utils/databases/mongo.ts b/packages/server/src/utils/databases/mongo.ts index 556878fe2..f440ffc1f 100644 --- a/packages/server/src/utils/databases/mongo.ts +++ b/packages/server/src/utils/databases/mongo.ts @@ -5,6 +5,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -110,6 +111,7 @@ ${command ?? "wait $MONGOD_PID"}`; ); const volumesMount = generateVolumeMounts(mounts); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, mongo); const docker = await getRemoteDocker(mongo.serverId); @@ -121,7 +123,12 @@ ${command ?? "wait $MONGOD_PID"}`; HealthCheck, Image: dockerImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(replicaSets diff --git a/packages/server/src/utils/databases/mysql.ts b/packages/server/src/utils/databases/mysql.ts index 493931fdc..98a057781 100644 --- a/packages/server/src/utils/databases/mysql.ts +++ b/packages/server/src/utils/databases/mysql.ts @@ -5,6 +5,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -68,6 +69,7 @@ export const buildMysql = async (mysql: MysqlNested) => { ); const volumesMount = generateVolumeMounts(mounts); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, mysql); const docker = await getRemoteDocker(mysql.serverId); @@ -79,7 +81,12 @@ export const buildMysql = async (mysql: MysqlNested) => { HealthCheck, Image: dockerImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(command && { diff --git a/packages/server/src/utils/databases/postgres.ts b/packages/server/src/utils/databases/postgres.ts index 7adb45367..9be3009ac 100644 --- a/packages/server/src/utils/databases/postgres.ts +++ b/packages/server/src/utils/databases/postgres.ts @@ -5,6 +5,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -61,6 +62,7 @@ export const buildPostgres = async (postgres: PostgresNested) => { ); const volumesMount = generateVolumeMounts(mounts); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, postgres); const docker = await getRemoteDocker(postgres.serverId); @@ -72,7 +74,12 @@ export const buildPostgres = async (postgres: PostgresNested) => { HealthCheck, Image: dockerImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(command && { diff --git a/packages/server/src/utils/databases/redis.ts b/packages/server/src/utils/databases/redis.ts index 4c7701726..65e7326ff 100644 --- a/packages/server/src/utils/databases/redis.ts +++ b/packages/server/src/utils/databases/redis.ts @@ -5,6 +5,7 @@ import { generateBindMounts, generateConfigContainer, generateFileMounts, + generateNetworkMounts, generateVolumeMounts, prepareEnvironmentVariables, } from "../docker/utils"; @@ -59,6 +60,7 @@ export const buildRedis = async (redis: RedisNested) => { ); const volumesMount = generateVolumeMounts(mounts); const bindsMount = generateBindMounts(mounts); + const networkMounts = generateNetworkMounts(mounts); const filesMount = generateFileMounts(appName, redis); const docker = await getRemoteDocker(redis.serverId); @@ -70,7 +72,12 @@ export const buildRedis = async (redis: RedisNested) => { HealthCheck, Image: dockerImage, Env: envVariables, - Mounts: [...volumesMount, ...bindsMount, ...filesMount], + Mounts: [ + ...volumesMount, + ...bindsMount, + ...networkMounts, + ...filesMount, + ], ...(StopGracePeriod !== null && StopGracePeriod !== undefined && { StopGracePeriod }), ...(command || args diff --git a/packages/server/src/utils/docker/utils.ts b/packages/server/src/utils/docker/utils.ts index 045040061..41f929fd7 100644 --- a/packages/server/src/utils/docker/utils.ts +++ b/packages/server/src/utils/docker/utils.ts @@ -601,6 +601,44 @@ export const generateBindMounts = (mounts: ApplicationNested["mounts"]) => { })); }; +export const generateNetworkMounts = (mounts: ApplicationNested["mounts"]) => { + if (!mounts || mounts.length === 0) { + return []; + } + + return mounts + .filter((mount) => mount.type === "nfs" || mount.type === "smb") + .map((mount) => { + // Check mount method + const mountMethod = mount.mountMethod || "host-mount"; + + if (mountMethod === "docker-volume" && mount.type === "nfs") { + // Use Docker native volume + const volumeName = + mount.dockerVolumeName || + `dokploy-nfs-${mount.mountId}`; + + return { + Type: "volume" as const, + Source: volumeName, + Target: mount.mountPath, + }; + } + + // Host-level mount (default for SMB and NFS with host-mount method) + // Use mountPathOnHost if available, otherwise construct it + const sourcePath = + mount.mountPathOnHost || + `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + return { + Type: "bind" as const, + Source: sourcePath, + Target: mount.mountPath, + }; + }); +}; + export const generateFileMounts = ( appName: string, service: diff --git a/packages/server/src/utils/encryption/mount-credentials.ts b/packages/server/src/utils/encryption/mount-credentials.ts new file mode 100644 index 000000000..66c602fca --- /dev/null +++ b/packages/server/src/utils/encryption/mount-credentials.ts @@ -0,0 +1,152 @@ +import { createCipheriv, createDecipheriv, randomBytes, scrypt } from "node:crypto"; +import { promisify } from "node:util"; + +const scryptAsync = promisify(scrypt); + +// Encryption key should be stored in environment variable +// If not set, generate a key (not recommended for production) +const getEncryptionKey = async (): Promise => { + const keyFromEnv = process.env.MOUNT_CREDENTIALS_ENCRYPTION_KEY; + if (keyFromEnv) { + // If key is provided as hex string, convert it + if (keyFromEnv.length === 64) { + // 32 bytes = 64 hex characters + return Buffer.from(keyFromEnv, "hex"); + } + // Otherwise derive key from password using scrypt + const salt = Buffer.from("dokploy-mount-credentials-salt", "utf8"); + return (await scryptAsync(keyFromEnv, salt, 32)) as Buffer; + } + + // Fallback: generate a key (should only be used in development) + console.warn( + "WARNING: MOUNT_CREDENTIALS_ENCRYPTION_KEY not set. Using default key (not secure for production!)", + ); + const defaultKey = "dokploy-default-mount-credentials-key-change-in-production"; + const salt = Buffer.from("dokploy-mount-credentials-salt", "utf8"); + return (await scryptAsync(defaultKey, salt, 32)) as Buffer; +}; + +const ALGORITHM = "aes-256-gcm"; +const IV_LENGTH = 16; // 16 bytes for AES +const AUTH_TAG_LENGTH = 16; // 16 bytes for GCM authentication tag + +/** + * Encrypts a plaintext string using AES-256-GCM + * @param plaintext - The text to encrypt + * @returns Encrypted string in format: iv:authTag:encryptedData (all base64) + */ +export const encryptCredential = async ( + plaintext: string, +): Promise => { + if (!plaintext) { + return ""; + } + + const key = await getEncryptionKey(); + const iv = randomBytes(IV_LENGTH); + + const cipher = createCipheriv(ALGORITHM, key, iv); + cipher.setAAD(Buffer.from("dokploy-mount-credentials", "utf8")); + + let encrypted = cipher.update(plaintext, "utf8", "base64"); + encrypted += cipher.final("base64"); + + const authTag = cipher.getAuthTag(); + + // Format: iv:authTag:encryptedData (all base64) + return `${iv.toString("base64")}:${authTag.toString("base64")}:${encrypted}`; +}; + +/** + * Decrypts an encrypted string using AES-256-GCM + * @param encrypted - The encrypted string in format: iv:authTag:encryptedData + * @returns Decrypted plaintext string + */ +export const decryptCredential = async ( + encrypted: string, +): Promise => { + if (!encrypted) { + return ""; + } + + const key = await getEncryptionKey(); + + // Parse the encrypted string: iv:authTag:encryptedData + const parts = encrypted.split(":"); + if (parts.length !== 3) { + throw new Error("Invalid encrypted credential format"); + } + + const [ivBase64, authTagBase64, encryptedData] = parts; + const iv = Buffer.from(ivBase64, "base64"); + const authTag = Buffer.from(authTagBase64, "base64"); + + const decipher = createDecipheriv(ALGORITHM, key, iv); + decipher.setAAD(Buffer.from("dokploy-mount-credentials", "utf8")); + decipher.setAuthTag(authTag); + + let decrypted = decipher.update(encryptedData, "base64", "utf8"); + decrypted += decipher.final("utf8"); + + return decrypted; +}; + +/** + * Encrypts mount credentials (username and password) + * @param credentials - Object with username and password + * @returns Encrypted credentials object + */ +export const encryptMountCredentials = async (credentials: { + username: string; + password: string; + domain?: string; +}): Promise<{ + username: string; + password: string; + domain?: string; +}> => { + const [encryptedUsername, encryptedPassword, encryptedDomain] = + await Promise.all([ + encryptCredential(credentials.username), + encryptCredential(credentials.password), + credentials.domain ? encryptCredential(credentials.domain) : undefined, + ]); + + return { + username: encryptedUsername, + password: encryptedPassword, + ...(encryptedDomain && { domain: encryptedDomain }), + }; +}; + +/** + * Decrypts mount credentials (username and password) + * @param encryptedCredentials - Object with encrypted username and password + * @returns Decrypted credentials object + */ +export const decryptMountCredentials = async (encryptedCredentials: { + username: string; + password: string; + domain?: string; +}): Promise<{ + username: string; + password: string; + domain?: string; +}> => { + const [decryptedUsername, decryptedPassword, decryptedDomain] = + await Promise.all([ + decryptCredential(encryptedCredentials.username), + decryptCredential(encryptedCredentials.password), + encryptedCredentials.domain + ? decryptCredential(encryptedCredentials.domain) + : undefined, + ]); + + return { + username: decryptedUsername, + password: decryptedPassword, + ...(decryptedDomain && { domain: decryptedDomain }), + }; +}; + diff --git a/packages/server/src/utils/errors/retry.ts b/packages/server/src/utils/errors/retry.ts new file mode 100644 index 000000000..19fd22b92 --- /dev/null +++ b/packages/server/src/utils/errors/retry.ts @@ -0,0 +1,134 @@ +import { sleep } from "../process/execAsync"; +import type { StructuredError } from "./structured-errors"; + +export interface RetryOptions { + maxRetries?: number; + initialDelay?: number; + maxDelay?: number; + backoffMultiplier?: number; + onRetry?: (attempt: number, error: Error) => void; + retryCondition?: (error: Error) => boolean; +} + +const DEFAULT_OPTIONS: Required> = { + maxRetries: 3, + initialDelay: 1000, + maxDelay: 30000, + backoffMultiplier: 2, +}; + +/** + * Calculate delay for retry with exponential backoff + */ +const calculateDelay = ( + attempt: number, + initialDelay: number, + maxDelay: number, + backoffMultiplier: number, +): number => { + const delay = initialDelay * Math.pow(backoffMultiplier, attempt - 1); + return Math.min(delay, maxDelay); +}; + +/** + * Retry a function with exponential backoff + */ +export const retryWithBackoff = async ( + fn: () => Promise, + options: RetryOptions = {}, +): Promise => { + const { + maxRetries = DEFAULT_OPTIONS.maxRetries, + initialDelay = DEFAULT_OPTIONS.initialDelay, + maxDelay = DEFAULT_OPTIONS.maxDelay, + backoffMultiplier = DEFAULT_OPTIONS.backoffMultiplier, + onRetry, + retryCondition, + } = options; + + let lastError: Error | undefined; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + return await fn(); + } catch (error) { + lastError = error instanceof Error ? error : new Error(String(error)); + + // Check if error should be retried + if (retryCondition && !retryCondition(lastError)) { + throw lastError; + } + + // Check if this is a StructuredError and if it can be retried + if (lastError instanceof StructuredError) { + if (!lastError.canRetry(attempt)) { + throw lastError; + } + } + + // Don't retry on last attempt + if (attempt === maxRetries) { + throw lastError; + } + + // Calculate delay and wait + const delay = calculateDelay( + attempt, + initialDelay, + maxDelay, + backoffMultiplier, + ); + + if (onRetry) { + onRetry(attempt, lastError); + } + + await sleep(delay); + } + } + + // This should never be reached, but TypeScript needs it + throw lastError || new Error("Retry failed"); +}; + +/** + * Retry condition for network errors + */ +export const isNetworkError = (error: Error): boolean => { + const message = error.message.toLowerCase(); + return ( + message.includes("network") || + message.includes("timeout") || + message.includes("connection") || + message.includes("econnrefused") || + message.includes("enotfound") || + message.includes("econnreset") + ); +}; + +/** + * Retry condition for mount errors + */ +export const isMountError = (error: Error): boolean => { + return ( + error.message.toLowerCase().includes("mount") || + error.message.toLowerCase().includes("nfs") || + error.message.toLowerCase().includes("smb") || + error.message.toLowerCase().includes("cifs") + ); +}; + +/** + * Retry condition for temporary errors + */ +export const isTemporaryError = (error: Error): boolean => { + const message = error.message.toLowerCase(); + return ( + isNetworkError(error) || + message.includes("temporary") || + message.includes("retry") || + message.includes("busy") || + message.includes("resource temporarily unavailable") + ); +}; + diff --git a/packages/server/src/utils/errors/structured-errors.ts b/packages/server/src/utils/errors/structured-errors.ts new file mode 100644 index 000000000..5a371cb78 --- /dev/null +++ b/packages/server/src/utils/errors/structured-errors.ts @@ -0,0 +1,208 @@ +/** + * Structured error types for better error handling and recovery + */ + +export enum ErrorCategory { + NETWORK = "network", + FILESYSTEM = "filesystem", + DATABASE = "database", + AUTHENTICATION = "authentication", + VALIDATION = "validation", + RESOURCE = "resource", + DEPLOYMENT = "deployment", + MOUNT = "mount", + UNKNOWN = "unknown", +} + +export enum ErrorSeverity { + LOW = "low", + MEDIUM = "medium", + HIGH = "high", + CRITICAL = "critical", +} + +export interface RecoverySuggestion { + action: string; + description: string; + command?: string; +} + +export interface StructuredErrorDetails { + category: ErrorCategory; + severity: ErrorSeverity; + code?: string; + recoverable: boolean; + suggestions?: RecoverySuggestion[]; + context?: Record; + retryable?: boolean; + maxRetries?: number; +} + +export class StructuredError extends Error { + public readonly category: ErrorCategory; + public readonly severity: ErrorSeverity; + public readonly code?: string; + public readonly recoverable: boolean; + public readonly suggestions?: RecoverySuggestion[]; + public readonly context?: Record; + public readonly retryable: boolean; + public readonly maxRetries: number; + + constructor( + message: string, + details: StructuredErrorDetails, + ) { + super(message); + this.name = "StructuredError"; + this.category = details.category; + this.severity = details.severity; + this.code = details.code; + this.recoverable = details.recoverable; + this.suggestions = details.suggestions; + this.context = details.context; + this.retryable = details.retryable ?? false; + this.maxRetries = details.maxRetries ?? 3; + + if (Error.captureStackTrace) { + Error.captureStackTrace(this, StructuredError); + } + } + + /** + * Get user-friendly error message with recovery suggestions + */ + getUserMessage(): string { + let message = this.message; + + if (this.suggestions && this.suggestions.length > 0) { + message += "\n\nRecovery suggestions:"; + this.suggestions.forEach((suggestion, index) => { + message += `\n${index + 1}. ${suggestion.action}: ${suggestion.description}`; + }); + } + + return message; + } + + /** + * Check if error can be retried + */ + canRetry(attempts: number): boolean { + return this.retryable && attempts < this.maxRetries; + } +} + +/** + * Create a mount-related error with recovery suggestions + */ +export const createMountError = ( + message: string, + context?: Record, +): StructuredError => { + const suggestions: RecoverySuggestion[] = []; + + // Add context-specific suggestions + if (context?.mountType === "nfs") { + suggestions.push({ + action: "Verify NFS server connectivity", + description: "Check if the NFS server is accessible and the export is available", + command: `ping -c 3 ${context.nfsServer || "NFS_SERVER"}`, + }); + suggestions.push({ + action: "Check NFS service status", + description: "Ensure NFS services are running on the server", + command: "systemctl status nfs-server", + }); + } + + if (context?.mountType === "smb") { + suggestions.push({ + action: "Verify SMB server connectivity", + description: "Check if the SMB server is accessible", + command: `ping -c 3 ${context.smbServer || "SMB_SERVER"}`, + }); + suggestions.push({ + action: "Check credentials", + description: "Verify username, password, and domain are correct", + }); + } + + if (context?.nodeId) { + suggestions.push({ + action: "Check node connectivity", + description: "Verify the swarm node is accessible and in the swarm", + command: `docker node inspect ${context.nodeId}`, + }); + } + + return new StructuredError(message, { + category: ErrorCategory.MOUNT, + severity: ErrorSeverity.MEDIUM, + code: "MOUNT_ERROR", + recoverable: true, + suggestions, + context, + retryable: true, + maxRetries: 3, + }); +}; + +/** + * Create a network error with recovery suggestions + */ +export const createNetworkError = ( + message: string, + context?: Record, +): StructuredError => { + return new StructuredError(message, { + category: ErrorCategory.NETWORK, + severity: ErrorSeverity.HIGH, + code: "NETWORK_ERROR", + recoverable: true, + suggestions: [ + { + action: "Check network connectivity", + description: "Verify network connection and firewall rules", + }, + { + action: "Retry operation", + description: "Network issues are often temporary", + }, + ], + context, + retryable: true, + maxRetries: 5, + }); +}; + +/** + * Create a filesystem error with recovery suggestions + */ +export const createFilesystemError = ( + message: string, + context?: Record, +): StructuredError => { + const suggestions: RecoverySuggestion[] = [ + { + action: "Check disk space", + description: "Verify sufficient disk space is available", + command: "df -h", + }, + { + action: "Check permissions", + description: "Verify file/directory permissions are correct", + command: `ls -la ${context?.path || "PATH"}`, + }, + ]; + + return new StructuredError(message, { + category: ErrorCategory.FILESYSTEM, + severity: ErrorSeverity.MEDIUM, + code: "FILESYSTEM_ERROR", + recoverable: true, + suggestions, + context, + retryable: false, + }); +}; + diff --git a/packages/server/src/utils/mounts/docker-volumes.ts b/packages/server/src/utils/mounts/docker-volumes.ts new file mode 100644 index 000000000..d9251d67e --- /dev/null +++ b/packages/server/src/utils/mounts/docker-volumes.ts @@ -0,0 +1,239 @@ +import type Dockerode from "dockerode"; +import { getRemoteDocker } from "../servers/remote-docker"; +import { createMountError } from "../errors/structured-errors"; +import { retryWithBackoff, isNetworkError } from "../errors/retry"; + +export interface CreateNFSVolumeParams { + volumeName: string; + nfsServer: string; + nfsPath: string; + mountOptions?: string; + serverId?: string | null; +} + +/** + * Creates a Docker volume using the local driver with NFS options + */ +export const createNFSVolume = async ( + params: CreateNFSVolumeParams, +): Promise => { + const { volumeName, nfsServer, nfsPath, mountOptions, serverId } = params; + + const docker = await getRemoteDocker(serverId); + + // Build mount options string + // Default options: rw (read-write) + const options = mountOptions || "rw"; + const mountOpts = `addr=${nfsServer},${options}`; + + try { + await retryWithBackoff( + async () => { + await docker.createVolume({ + Name: volumeName, + Driver: "local", + DriverOpts: { + type: "nfs", + o: mountOpts, + device: `:${nfsPath}`, + }, + }); + }, + { + maxRetries: 3, + retryCondition: isNetworkError, + }, + ); + } catch (error) { + throw createMountError( + `Failed to create Docker NFS volume ${volumeName}`, + { + mountType: "nfs", + nfsServer, + nfsPath, + volumeName, + originalError: error instanceof Error ? error.message : String(error), + }, + ); + } +}; + +/** + * Removes a Docker volume + */ +export const removeNFSVolume = async ( + volumeName: string, + serverId?: string | null, +): Promise => { + const docker = await getRemoteDocker(serverId); + + try { + const volume = docker.getVolume(volumeName); + await volume.remove({ force: true }); + } catch (error) { + // Volume might not exist, which is fine + if ( + error instanceof Error && + error.message.includes("No such volume") + ) { + return; + } + throw createMountError( + `Failed to remove Docker volume ${volumeName}`, + { + mountType: "nfs", + volumeName, + originalError: error instanceof Error ? error.message : String(error), + }, + ); + } +}; + +/** + * Verifies that a Docker volume exists and is accessible + */ +export const verifyNFSVolume = async ( + volumeName: string, + serverId?: string | null, +): Promise<{ exists: boolean; error?: string }> => { + const docker = await getRemoteDocker(serverId); + + try { + const volume = docker.getVolume(volumeName); + await volume.inspect(); + return { exists: true }; + } catch (error) { + return { + exists: false, + error: + error instanceof Error ? error.message : String(error), + }; + } +}; + +/** + * Syncs Docker volumes to selected Swarm nodes + * Docker volumes need to exist on nodes where tasks run + */ +export const syncDockerVolumesToNodes = async ( + volumeName: string, + nfsServer: string, + nfsPath: string, + mountOptions: string | undefined, + nodeIds: string[], + serverId?: string | null, +): Promise> => { + const results = new Map(); + + // Get Docker instance for the server + const docker = await getRemoteDocker(serverId); + + // For each node, we need to create the volume on that node + // In Docker Swarm, volumes are created on the manager node and + // Docker handles distribution, but for NFS volumes, we may need + // to ensure they exist on each node where tasks might run + + // Get swarm nodes to determine which nodes need the volume + const { getSwarmNodes } = await import("../../services/docker"); + const allNodes = await getSwarmNodes(serverId || undefined); + + if (!allNodes || allNodes.length === 0) { + throw new Error("No swarm nodes found"); + } + + for (const nodeId of nodeIds) { + try { + // Check if node exists + const node = allNodes.find((n) => n.ID === nodeId); + if (!node) { + results.set(nodeId, { + success: false, + error: `Node ${nodeId} not found in swarm`, + }); + continue; + } + + // For Docker Swarm, volumes created on the manager are available + // to all nodes. However, NFS volumes with the local driver need + // to be accessible from each node. We'll create the volume and + // Docker will handle the mount when a container uses it. + + // Try to create/inspect the volume + // If it already exists, that's fine + try { + const volume = docker.getVolume(volumeName); + await volume.inspect(); + // Volume exists, mark as success + results.set(nodeId, { success: true }); + } catch (error) { + // Volume doesn't exist, create it + // Note: In Swarm mode, volumes are typically created on the manager + // and Docker handles making them available to worker nodes + if ( + error instanceof Error && + error.message.includes("No such volume") + ) { + // Only create on manager node (or if we're on the manager) + if (node.Spec.Role === "manager" || !serverId) { + await createNFSVolume({ + volumeName, + nfsServer, + nfsPath, + mountOptions, + serverId, + }); + results.set(nodeId, { success: true }); + } else { + // For worker nodes, the volume will be created when needed + // by Docker Swarm. We'll mark it as success but note that + // the actual mount happens when a container uses it. + results.set(nodeId, { success: true }); + } + } else { + throw error; + } + } + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + results.set(nodeId, { success: false, error: errorMessage }); + } + } + + return results; +}; + +/** + * Gets volume information + */ +export const getVolumeInfo = async ( + volumeName: string, + serverId?: string | null, +): Promise<{ + name: string; + driver: string; + mountpoint?: string; + options?: Record; +} | null> => { + const docker = await getRemoteDocker(serverId); + + try { + const volume = docker.getVolume(volumeName); + const info = await volume.inspect(); + return { + name: info.Name, + driver: info.Driver, + mountpoint: info.Mountpoint, + options: info.Options, + }; + } catch (error) { + if ( + error instanceof Error && + error.message.includes("No such volume") + ) { + return null; + } + throw error; + } +}; + diff --git a/packages/server/src/utils/mounts/host-mount.ts b/packages/server/src/utils/mounts/host-mount.ts new file mode 100644 index 000000000..51ed4318e --- /dev/null +++ b/packages/server/src/utils/mounts/host-mount.ts @@ -0,0 +1,322 @@ +import { execAsync, execAsyncRemote } from "../process/execAsync"; +import { decryptMountCredentials } from "../encryption/mount-credentials"; +import { createMountError, createNetworkError } from "../errors/structured-errors"; +import { retryWithBackoff, isNetworkError, isMountError } from "../errors/retry"; +import type { mountCredentials } from "../../db/schema/mount-credentials"; + +export interface MountNFSParams { + nfsServer: string; + nfsPath: string; + mountPoint: string; + mountOptions?: string; + credentials?: typeof mountCredentials.$inferSelect; + serverId?: string | null; +} + +export interface MountSMBParams { + smbServer: string; + smbShare: string; + smbPath?: string; + mountPoint: string; + mountOptions?: string; + credentials?: typeof mountCredentials.$inferSelect; + serverId?: string | null; +} + +/** + * Creates a secure credentials file for mounting + * Returns the path to the credentials file + */ +const createCredentialsFile = async ( + credentials: typeof mountCredentials.$inferSelect, + mountPoint: string, + serverId?: string | null, +): Promise => { + const decrypted = await decryptMountCredentials({ + username: credentials.username, + password: credentials.password, + domain: credentials.domain || undefined, + }); + + const credsDir = `${mountPoint}/.credentials`; + const credsFile = `${credsDir}/.smbcredentials`; + + const createDirCommand = `mkdir -p ${credsDir}`; + const createFileCommand = `cat > ${credsFile} << 'EOF' +username=${decrypted.username} +password=${decrypted.password} +${decrypted.domain ? `domain=${decrypted.domain}` : ""} +EOF +chmod 600 ${credsFile}`; + + if (serverId) { + await execAsyncRemote(serverId, createDirCommand); + await execAsyncRemote(serverId, createFileCommand); + } else { + await execAsync(createDirCommand); + await execAsync(createFileCommand); + } + + return credsFile; +}; + +/** + * Mounts an NFS share on the host + */ +export const mountNFS = async (params: MountNFSParams): Promise => { + const { nfsServer, nfsPath, mountPoint, mountOptions, serverId } = params; + + // Create mount point if it doesn't exist + const createMountPointCommand = `mkdir -p ${mountPoint}`; + if (serverId) { + await execAsyncRemote(serverId, createMountPointCommand); + } else { + await execAsync(createMountPointCommand); + } + + // Build mount command + const mountSource = `${nfsServer}:${nfsPath}`; + const options = mountOptions || "vers=4.0,soft,timeo=30"; + const mountCommand = `mount -t nfs -o ${options} ${mountSource} ${mountPoint}`; + + try { + await retryWithBackoff( + async () => { + if (serverId) { + await execAsyncRemote(serverId, mountCommand); + } else { + await execAsync(mountCommand); + } + }, + { + maxRetries: 3, + retryCondition: (error) => isNetworkError(error) || isMountError(error), + }, + ); + } catch (error) { + throw createMountError( + `Failed to mount NFS share ${mountSource} to ${mountPoint}`, + { + mountType: "nfs", + nfsServer, + nfsPath, + mountPoint, + originalError: error instanceof Error ? error.message : String(error), + }, + ); + } +}; + +/** + * Mounts an SMB/CIFS share on the host + */ +export const mountSMB = async (params: MountSMBParams): Promise => { + const { + smbServer, + smbShare, + smbPath, + mountPoint, + mountOptions, + credentials, + serverId, + } = params; + + // Create mount point if it doesn't exist + const createMountPointCommand = `mkdir -p ${mountPoint}`; + if (serverId) { + await execAsyncRemote(serverId, createMountPointCommand); + } else { + await execAsync(createMountPointCommand); + } + + // Build mount source + const mountSource = smbPath + ? `//${smbServer}/${smbShare}/${smbPath}` + : `//${smbServer}/${smbShare}`; + + // Build mount options + let options = mountOptions || "vers=3.0"; + if (credentials) { + const credsFile = await createCredentialsFile( + credentials, + mountPoint, + serverId, + ); + options += `,credentials=${credsFile}`; + } + + const mountCommand = `mount -t cifs -o ${options} ${mountSource} ${mountPoint}`; + + try { + await retryWithBackoff( + async () => { + if (serverId) { + await execAsyncRemote(serverId, mountCommand); + } else { + await execAsync(mountCommand); + } + }, + { + maxRetries: 3, + retryCondition: (error) => isNetworkError(error) || isMountError(error), + }, + ); + } catch (error) { + throw createMountError( + `Failed to mount SMB share ${mountSource} to ${mountPoint}`, + { + mountType: "smb", + smbServer, + smbShare, + smbPath, + mountPoint, + originalError: error instanceof Error ? error.message : String(error), + }, + ); + } +}; + +/** + * Unmounts a network share (NFS or SMB) + */ +export const unmountNetworkShare = async ( + mountPoint: string, + serverId?: string | null, +): Promise => { + const unmountCommand = `umount ${mountPoint} || umount -l ${mountPoint}`; + + try { + if (serverId) { + await execAsyncRemote(serverId, unmountCommand); + } else { + await execAsync(unmountCommand); + } + + // Clean up credentials file if it exists + const cleanupCommand = `rm -rf ${mountPoint}/.credentials 2>/dev/null || true`; + if (serverId) { + await execAsyncRemote(serverId, cleanupCommand); + } else { + await execAsync(cleanupCommand); + } + } catch (error) { + // Try lazy unmount if regular unmount fails + const lazyUnmountCommand = `umount -l ${mountPoint} || true`; + if (serverId) { + await execAsyncRemote(serverId, lazyUnmountCommand); + } else { + await execAsync(lazyUnmountCommand); + } + } +}; + +/** + * Verifies that a mount point is accessible and mounted + */ +export const verifyMount = async ( + mountPoint: string, + serverId?: string | null, +): Promise<{ mounted: boolean; error?: string }> => { + // Check if mount point exists and is a directory + const checkMountPointCommand = `test -d ${mountPoint} && echo "exists" || echo "missing"`; + let mountPointExists: string; + + if (serverId) { + const result = await execAsyncRemote(serverId, checkMountPointCommand); + mountPointExists = result.stdout.trim(); + } else { + const result = await execAsync(checkMountPointCommand); + mountPointExists = result.stdout.trim(); + } + + if (mountPointExists !== "exists") { + return { + mounted: false, + error: `Mount point ${mountPoint} does not exist`, + }; + } + + // Check if it's actually mounted by checking /proc/mounts or mount command + const checkMountedCommand = `mountpoint -q ${mountPoint} && echo "mounted" || echo "not_mounted"`; + let isMounted: string; + + try { + if (serverId) { + const result = await execAsyncRemote(serverId, checkMountedCommand); + isMounted = result.stdout.trim(); + } else { + const result = await execAsync(checkMountedCommand); + isMounted = result.stdout.trim(); + } + + if (isMounted === "mounted") { + // Try to read from the mount to verify it's accessible + const testReadCommand = `test -r ${mountPoint} && echo "readable" || echo "not_readable"`; + let isReadable: string; + + if (serverId) { + const result = await execAsyncRemote(serverId, testReadCommand); + isReadable = result.stdout.trim(); + } else { + const result = await execAsync(testReadCommand); + isReadable = result.stdout.trim(); + } + + if (isReadable === "readable") { + return { mounted: true }; + } + return { + mounted: false, + error: `Mount point ${mountPoint} is mounted but not readable`, + }; + } + + return { + mounted: false, + error: `Mount point ${mountPoint} is not mounted`, + }; + } catch (error) { + return { + mounted: false, + error: `Failed to verify mount: ${ + error instanceof Error ? error.message : String(error) + }`, + }; + } +}; + +/** + * Gets the mount status from /proc/mounts + */ +export const getMountStatus = async ( + mountPoint: string, + serverId?: string | null, +): Promise<{ mounted: boolean; type?: string; source?: string }> => { + const checkCommand = `grep -E "^[^ ]+ ${mountPoint} " /proc/mounts | head -1 || echo ""`; + let output: string; + + if (serverId) { + const result = await execAsyncRemote(serverId, checkCommand); + output = result.stdout.trim(); + } else { + const result = await execAsync(checkCommand); + output = result.stdout.trim(); + } + + if (!output) { + return { mounted: false }; + } + + // Parse /proc/mounts format: device mountpoint fstype options dump pass + const parts = output.split(/\s+/); + if (parts.length >= 3) { + return { + mounted: true, + source: parts[0], + type: parts[2], + }; + } + + return { mounted: true }; +}; + diff --git a/packages/server/src/utils/mounts/node-mount-manager.ts b/packages/server/src/utils/mounts/node-mount-manager.ts new file mode 100644 index 000000000..bead454c6 --- /dev/null +++ b/packages/server/src/utils/mounts/node-mount-manager.ts @@ -0,0 +1,160 @@ +import { db } from "../../db"; +import { mountCredentials } from "../../db/schema/mount-credentials"; +import { mounts } from "../../db/schema/mount"; +import { eq } from "drizzle-orm"; +import { execAsync, execAsyncRemote } from "../process/execAsync"; +import { + mountNFS, + mountSMB, + unmountNetworkShare, + getMountStatus, +} from "./host-mount"; + +/** + * Mounts NFS/SMB on a specific swarm node + */ +export const mountOnNode = async ( + mount: typeof mounts.$inferSelect, + nodeId: string, + serverId?: string | null, +): Promise<{ success: boolean; error?: string }> => { + try { + const mountPoint = + mount.mountPathOnHost || + `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + const finalMountOptions = nodeConfig?.mountOptions || mount.mountOptions; + + // Get credentials if needed + let credentials: typeof mountCredentials.$inferSelect | undefined; + if (mount.credentialsId) { + credentials = await db.query.mountCredentials.findFirst({ + where: eq(mountCredentials.credentialsId, mount.credentialsId), + }); + } + + if (mount.type === "nfs") { + if (!mount.nfsServer || !mount.nfsPath) { + throw new Error("NFS mount missing server or path"); + } + + await mountNFS({ + nfsServer: mount.nfsServer, + nfsPath: mount.nfsPath, + mountPoint: finalMountPoint, + mountOptions: finalMountOptions || undefined, + serverId: serverId || null, + }); + } else if (mount.type === "smb") { + if (!mount.smbServer || !mount.smbShare) { + throw new Error("SMB mount missing server or share"); + } + + await mountSMB({ + smbServer: mount.smbServer, + smbShare: mount.smbShare, + smbPath: mount.smbPath || undefined, + mountPoint: finalMountPoint, + mountOptions: finalMountOptions || undefined, + credentials: credentials, + serverId: serverId || null, + }); + } else { + throw new Error(`Unsupported mount type: ${mount.type}`); + } + + // Verify mount + const verifyResult = await getMountStatus(finalMountPoint, serverId); + if (!verifyResult.mounted) { + throw new Error("Mount created but verification failed"); + } + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : String(error), + }; + } +}; + +/** + * Unmounts from a specific node + * Checks if mount is in use by any containers before unmounting + */ +export const unmountFromNode = async ( + mount: typeof mounts.$inferSelect, + nodeId: string, + serverId?: string | null, +): Promise<{ success: boolean; error?: string }> => { + try { + const mountPoint = + mount.mountPathOnHost || + `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + + // Check if mount is in use by any containers + // This is a simple check - in production you might want more sophisticated detection + const checkInUseCommand = `lsof ${finalMountPoint} 2>/dev/null | grep -q . && echo "in_use" || echo "not_in_use"`; + let inUse: string; + + if (serverId) { + const result = await execAsyncRemote(serverId, checkInUseCommand); + inUse = result.stdout.trim(); + } else { + const result = await execAsync(checkInUseCommand); + inUse = result.stdout.trim(); + } + + if (inUse === "in_use") { + return { + success: false, + error: "Mount is currently in use by containers", + }; + } + + // Safe to unmount + await unmountNetworkShare(finalMountPoint, serverId); + + return { success: true }; + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : String(error), + }; + } +}; + +/** + * Gets mount status on a node + */ +export const getNodeMountStatus = async ( + mount: typeof mounts.$inferSelect, + nodeId: string, + serverId?: string | null, +): Promise<{ mounted: boolean; type?: string; source?: string; error?: string }> => { + try { + const mountPoint = + mount.mountPathOnHost || + `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + + const status = await getMountStatus(finalMountPoint, serverId); + return status; + } catch (error) { + return { + mounted: false, + error: error instanceof Error ? error.message : String(error), + }; + } +}; + diff --git a/packages/server/src/utils/mounts/swarm-replication.ts b/packages/server/src/utils/mounts/swarm-replication.ts new file mode 100644 index 000000000..4ed2ae102 --- /dev/null +++ b/packages/server/src/utils/mounts/swarm-replication.ts @@ -0,0 +1,498 @@ +import { and, eq } from "drizzle-orm"; +import { db } from "../../db"; +import { mounts } from "../../db/schema/mount"; +import { mountCredentials } from "../../db/schema/mount-credentials"; +import { mountNodeStatus } from "../../db/schema/mount-node-status"; +import { getNodeInfo, getSwarmNodes } from "../../services/docker"; +import { decryptMountCredentials } from "../encryption/mount-credentials"; +import { execAsync, execAsyncRemote } from "../process/execAsync"; +import { syncDockerVolumesToNodes } from "./docker-volumes"; +import { mountNFS, mountSMB, verifyMount } from "./host-mount"; + +export interface SwarmNodeInfo { + nodeId: string; + hostname: string; + ip: string; + role: "manager" | "worker"; + status: string; + availability: "active" | "pause" | "drain"; + labels?: Record; +} + +/** + * Gets list of swarm nodes that need the mount based on targetNodes configuration + * Validates that all target nodes are accessible and in the swarm + */ +export const getSwarmNodesForMount = async ( + mount: typeof mounts.$inferSelect, + serverId?: string | null, +): Promise => { + if (!mount.replicateToSwarm) { + return []; + } + + if (!mount.targetNodes || mount.targetNodes.length === 0) { + throw new Error( + "targetNodes must be specified when replicateToSwarm is true", + ); + } + + // Get all swarm nodes + const allNodes = await getSwarmNodes(serverId || undefined); + if (!allNodes || allNodes.length === 0) { + throw new Error("No swarm nodes found"); + } + + // Filter to only target nodes and get detailed info + const targetNodeInfos: SwarmNodeInfo[] = []; + + for (const targetNodeId of mount.targetNodes) { + const node = allNodes.find((n) => n.ID === targetNodeId); + if (!node) { + throw new Error( + `Node ${targetNodeId} not found in swarm. It may have been removed.`, + ); + } + + // Filter out drained/removed nodes + if (node.Spec.Availability === "drain") { + console.warn( + `Node ${targetNodeId} is drained, skipping mount configuration`, + ); + continue; + } + + // Get detailed node info + const nodeInfo = await getNodeInfo(targetNodeId, serverId || undefined); + if (!nodeInfo) { + throw new Error(`Failed to get info for node ${targetNodeId}`); + } + + targetNodeInfos.push({ + nodeId: targetNodeId, + hostname: node.Description?.Hostname || targetNodeId, + ip: node.Status?.Addr || "", + role: node.Spec.Role, + status: node.Status?.State || "unknown", + availability: node.Spec.Availability, + labels: node.Spec.Labels || {}, + }); + } + + return targetNodeInfos; +}; + +/** + * Securely distributes encrypted credentials to specified nodes via SSH + */ +export const distributeCredentialsToNodes = async ( + mount: typeof mounts.$inferSelect, + nodeIds: string[], + serverId?: string | null, +): Promise => { + if (!mount.credentialsId) { + // No credentials needed (e.g., anonymous NFS) + return; + } + + // Get credentials from database + const credentials = await db.query.mountCredentials.findFirst({ + where: eq(mountCredentials.credentialsId, mount.credentialsId), + }); + + if (!credentials) { + throw new Error(`Credentials not found for mount ${mount.mountId}`); + } + + // Decrypt credentials (only in memory) + const decrypted = await decryptMountCredentials({ + username: credentials.username, + password: credentials.password, + domain: credentials.domain || undefined, + }); + + // Determine mount point path + const mountPoint = + mount.mountPathOnHost || `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + // Create credentials directory and file on each node + for (const nodeId of nodeIds) { + const credsDir = `${mountPoint}/.credentials`; + const credsFile = `${credsDir}/.smbcredentials`; + + // Create directory + const createDirCommand = `mkdir -p ${credsDir}`; + + // Create credentials file with proper permissions + const createFileCommand = `cat > ${credsFile} << 'EOF' +username=${decrypted.username} +password=${decrypted.password} +${decrypted.domain ? `domain=${decrypted.domain}` : ""} +EOF +chmod 600 ${credsFile}`; + + try { + // For now, we use the same serverId for all nodes + // In a multi-server setup, we'd need to determine the server for each node + if (serverId) { + await execAsyncRemote(serverId, createDirCommand); + await execAsyncRemote(serverId, createFileCommand); + } else { + await execAsync(createDirCommand); + await execAsync(createFileCommand); + } + } catch (error) { + console.error( + `Failed to distribute credentials to node ${nodeId}:`, + error, + ); + throw new Error( + `Failed to distribute credentials to node ${nodeId}: ${ + error instanceof Error ? error.message : String(error) + }`, + ); + } + } +}; + +/** + * Ensures mount exists on all specified target swarm nodes + */ +export const syncMountToAllNodes = async ( + mount: typeof mounts.$inferSelect, + nodeIds: string[], + serverId?: string | null, +): Promise> => { + const mountMethod = mount.mountMethod || "host-mount"; + + // Route to appropriate sync function based on mount method + if (mountMethod === "docker-volume" && mount.type === "nfs") { + // Use Docker volume sync + if (!mount.dockerVolumeName || !mount.nfsServer || !mount.nfsPath) { + throw new Error( + "Docker volume mount missing required fields (dockerVolumeName, nfsServer, nfsPath)", + ); + } + + return await syncDockerVolumesToNodes( + mount.dockerVolumeName, + mount.nfsServer, + mount.nfsPath, + mount.mountOptions || undefined, + nodeIds, + serverId, + ); + } + + // Host-level mount sync (existing logic) + const results = new Map(); + + // Get credentials if needed + let credentials: typeof mountCredentials.$inferSelect | undefined; + if (mount.credentialsId) { + credentials = await db.query.mountCredentials.findFirst({ + where: eq(mountCredentials.credentialsId, mount.credentialsId), + }); + } + + // Determine mount point path + const mountPoint = + mount.mountPathOnHost || `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + for (const nodeId of nodeIds) { + try { + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + const finalMountOptions = nodeConfig?.mountOptions || mount.mountOptions; + + // Check if mount already exists + const mountStatus = await verifyMount(finalMountPoint, serverId); + if (mountStatus.mounted) { + // Mount already exists, update status + await updateMountNodeStatus(mount.mountId, nodeId, { + mountStatus: "mounted", + serverId: serverId || null, + }); + results.set(nodeId, { success: true }); + continue; + } + + // Mount doesn't exist, create it + if (mount.type === "nfs") { + if (!mount.nfsServer || !mount.nfsPath) { + throw new Error("NFS mount missing server or path"); + } + + await mountNFS({ + nfsServer: mount.nfsServer, + nfsPath: mount.nfsPath, + mountPoint: finalMountPoint, + mountOptions: finalMountOptions || undefined, + serverId: serverId || null, + }); + } else if (mount.type === "smb") { + if (!mount.smbServer || !mount.smbShare) { + throw new Error("SMB mount missing server or share"); + } + + await mountSMB({ + smbServer: mount.smbServer, + smbShare: mount.smbShare, + smbPath: mount.smbPath || undefined, + mountPoint: finalMountPoint, + mountOptions: finalMountOptions || undefined, + credentials: credentials, + serverId: serverId || null, + }); + } else { + throw new Error(`Unsupported mount type: ${mount.type}`); + } + + // Verify mount was successful + const verifyResult = await verifyMount(finalMountPoint, serverId); + if (!verifyResult.mounted) { + throw new Error(verifyResult.error || "Mount verification failed"); + } + + // Update status + await updateMountNodeStatus(mount.mountId, nodeId, { + mountStatus: "mounted", + serverId: serverId || null, + }); + + results.set(nodeId, { success: true }); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + results.set(nodeId, { success: false, error: errorMessage }); + + // Update status with error + await updateMountNodeStatus(mount.mountId, nodeId, { + mountStatus: "failed", + errorMessage, + serverId: serverId || null, + }); + } + } + + return results; +}; + +/** + * Verifies mounts on specified nodes + */ +export const verifyMountsOnNodes = async ( + mountId: string, + nodeIds: string[], + serverId?: string | null, +): Promise> => { + const mount = await db.query.mounts.findFirst({ + where: eq(mounts.mountId, mountId), + }); + + if (!mount) { + throw new Error(`Mount ${mountId} not found`); + } + + const results = new Map(); + + for (const nodeId of nodeIds) { + try { + const mountPoint = + mount.mountPathOnHost || `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + + const verifyResult = await verifyMount(finalMountPoint, serverId); + + // Update status + await updateMountNodeStatus(mountId, nodeId, { + mountStatus: verifyResult.mounted ? "mounted" : "failed", + errorMessage: verifyResult.error, + serverId: serverId || null, + }); + + results.set(nodeId, { + mounted: verifyResult.mounted, + error: verifyResult.error, + }); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + results.set(nodeId, { mounted: false, error: errorMessage }); + + await updateMountNodeStatus(mountId, nodeId, { + mountStatus: "failed", + errorMessage, + serverId: serverId || null, + }); + } + } + + return results; +}; + +/** + * Removes mount and credentials from nodes when mount is deleted + */ +export const cleanupMountFromNodes = async ( + mount: typeof mounts.$inferSelect, + nodeIds: string[], + serverId?: string | null, +): Promise => { + const mountPoint = + mount.mountPathOnHost || `/mnt/dokploy-${mount.type}-${mount.mountId}`; + + for (const nodeId of nodeIds) { + try { + // Get per-node config if available + const nodeConfig = mount.nodeSpecificConfig?.[nodeId]; + const finalMountPoint = nodeConfig?.mountPath || mountPoint; + + // Unmount + const unmountCommand = `umount ${finalMountPoint} || umount -l ${finalMountPoint} || true`; + + if (serverId) { + await execAsyncRemote(serverId, unmountCommand); + } else { + await execAsync(unmountCommand); + } + + // Clean up credentials + const cleanupCommand = `rm -rf ${finalMountPoint}/.credentials 2>/dev/null || true`; + if (serverId) { + await execAsyncRemote(serverId, cleanupCommand); + } else { + await execAsync(cleanupCommand); + } + + // Update status + await updateMountNodeStatus(mount.mountId, nodeId, { + mountStatus: "unmounted", + serverId: serverId || null, + }); + } catch (error) { + console.error(`Failed to cleanup mount from node ${nodeId}:`, error); + // Continue with other nodes even if one fails + } + } +}; + +/** + * Tests if NFS/SMB server is accessible from a specific node + */ +export const testNodeConnectivity = async ( + _nodeId: string, + nfsServer?: string, + smbServer?: string, + serverId?: string | null, +): Promise<{ accessible: boolean; latency?: number; error?: string }> => { + const startTime = Date.now(); + + try { + if (nfsServer) { + // Test NFS connectivity (ping or port check) + const testCommand = `timeout 5 bash -c ' => { + const existing = await db.query.mountNodeStatus.findFirst({ + where: and( + eq(mountNodeStatus.mountId, mountId), + eq(mountNodeStatus.nodeId, nodeId), + ), + }); + + if (existing) { + await db + .update(mountNodeStatus) + .set({ + ...updates, + lastVerified: new Date(), + updatedAt: new Date().toISOString(), + }) + .where( + and( + eq(mountNodeStatus.mountId, mountId), + eq(mountNodeStatus.nodeId, nodeId), + ), + ); + } else { + // Get node info for hostname + const nodeInfo = await getNodeInfo(nodeId, updates.serverId || undefined); + const hostname = nodeInfo?.Description?.Hostname || nodeId.substring(0, 12); + + await db.insert(mountNodeStatus).values({ + mountId, + nodeId, + nodeHostname: hostname, + mountStatus: updates.mountStatus || "pending", + errorMessage: updates.errorMessage || null, + serverId: updates.serverId || null, + lastVerified: new Date(), + }); + } +}; diff --git a/packages/server/src/utils/security/validation.ts b/packages/server/src/utils/security/validation.ts new file mode 100644 index 000000000..808a5322e --- /dev/null +++ b/packages/server/src/utils/security/validation.ts @@ -0,0 +1,258 @@ +/** + * Security validation utilities for input sanitization and path validation + */ + +/** + * Validates and sanitizes file paths to prevent directory traversal attacks + */ +export const validatePath = (path: string, allowAbsolute = false): string => { + if (!path || typeof path !== "string") { + throw new Error("Path must be a non-empty string"); + } + + // Remove null bytes + let sanitized = path.replace(/\0/g, ""); + + // Check for directory traversal attempts + if ( + sanitized.includes("..") || + sanitized.includes("//") || + sanitized.includes("\\\\") + ) { + throw new Error("Path contains invalid characters (directory traversal attempt)"); + } + + // If absolute paths are not allowed, ensure it's relative + if (!allowAbsolute && sanitized.startsWith("/")) { + throw new Error("Absolute paths are not allowed"); + } + + // Trim whitespace + sanitized = sanitized.trim(); + + // Ensure path doesn't end with a slash (except root) + if (sanitized.length > 1 && sanitized.endsWith("/")) { + sanitized = sanitized.slice(0, -1); + } + + return sanitized; +}; + +/** + * Validates mount path (must be absolute and not contain dangerous patterns) + */ +export const validateMountPath = (path: string): string => { + if (!path || typeof path !== "string") { + throw new Error("Mount path must be a non-empty string"); + } + + // Must be absolute + if (!path.startsWith("/")) { + throw new Error("Mount path must be absolute (start with /)"); + } + + // Validate using general path validation + const sanitized = validatePath(path, true); + + // Additional checks for mount paths + if (sanitized.length === 0 || sanitized === "/") { + throw new Error("Mount path cannot be root"); + } + + // Check for dangerous mount points + const dangerousPaths = [ + "/etc", + "/bin", + "/sbin", + "/usr/bin", + "/usr/sbin", + "/lib", + "/lib64", + "/sys", + "/proc", + "/dev", + "/boot", + "/root", + ]; + + for (const dangerous of dangerousPaths) { + if (sanitized === dangerous || sanitized.startsWith(`${dangerous}/`)) { + throw new Error(`Mount path cannot be ${dangerous} or its subdirectories`); + } + } + + return sanitized; +}; + +/** + * Validates NFS server address (IP or hostname) + */ +export const validateNFSServer = (server: string): string => { + if (!server || typeof server !== "string") { + throw new Error("NFS server must be a non-empty string"); + } + + const sanitized = server.trim(); + + // Basic validation for IP or hostname + const ipRegex = + /^(\d{1,3}\.){3}\d{1,3}$/; + const hostnameRegex = + /^[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$/; + + if (!ipRegex.test(sanitized) && !hostnameRegex.test(sanitized)) { + throw new Error("NFS server must be a valid IP address or hostname"); + } + + // Validate IP address ranges if it's an IP + if (ipRegex.test(sanitized)) { + const parts = sanitized.split(".").map(Number); + if (parts.some((part) => part < 0 || part > 255)) { + throw new Error("NFS server IP address contains invalid octets"); + } + } + + return sanitized; +}; + +/** + * Validates SMB server address (IP or hostname) + */ +export const validateSMBServer = (server: string): string => { + // Same validation as NFS server + return validateNFSServer(server); +}; + +/** + * Validates NFS export path + */ +export const validateNFSPath = (path: string): string => { + if (!path || typeof path !== "string") { + throw new Error("NFS path must be a non-empty string"); + } + + // Must be absolute + if (!path.startsWith("/")) { + throw new Error("NFS path must be absolute (start with /)"); + } + + // Validate using general path validation + return validatePath(path, true); +}; + +/** + * Validates SMB share name + */ +export const validateSMBShare = (share: string): string => { + if (!share || typeof share !== "string") { + throw new Error("SMB share name must be a non-empty string"); + } + + const sanitized = share.trim(); + + // SMB share names have restrictions + if (sanitized.length === 0 || sanitized.length > 80) { + throw new Error("SMB share name must be between 1 and 80 characters"); + } + + // Check for invalid characters + if (!/^[a-zA-Z0-9_-]+$/.test(sanitized)) { + throw new Error( + "SMB share name can only contain letters, numbers, underscores, and hyphens", + ); + } + + return sanitized; +}; + +/** + * Validates SMB subdirectory path + */ +export const validateSMBPath = (path: string): string => { + if (!path) { + return ""; + } + + if (typeof path !== "string") { + throw new Error("SMB path must be a string"); + } + + // Must start with / + if (!path.startsWith("/")) { + throw new Error("SMB path must start with /"); + } + + // Validate using general path validation + return validatePath(path, true); +}; + +/** + * Sanitizes username to prevent injection attacks + */ +export const sanitizeUsername = (username: string): string => { + if (!username || typeof username !== "string") { + throw new Error("Username must be a non-empty string"); + } + + // Remove null bytes and control characters + let sanitized = username.replace(/[\0\x00-\x1F\x7F]/g, ""); + + // Trim whitespace + sanitized = sanitized.trim(); + + // Check length + if (sanitized.length === 0 || sanitized.length > 255) { + throw new Error("Username must be between 1 and 255 characters"); + } + + return sanitized; +}; + +/** + * Validates mount options string + */ +export const validateMountOptions = (options: string | undefined): string | undefined => { + if (!options) { + return undefined; + } + + if (typeof options !== "string") { + throw new Error("Mount options must be a string"); + } + + const sanitized = options.trim(); + + // Check for dangerous options that could be used for injection + const dangerousOptions = ["exec", "suid", "dev", "nosuid", "nodev", "noexec"]; + + // Allow these but log a warning + for (const dangerous of dangerousOptions) { + if (sanitized.includes(dangerous)) { + console.warn( + `Mount option "${dangerous}" detected. Ensure this is intentional.`, + ); + } + } + + // Remove null bytes + return sanitized.replace(/\0/g, ""); +}; + +/** + * Validates Docker Swarm node ID + */ +export const validateNodeId = (nodeId: string): string => { + if (!nodeId || typeof nodeId !== "string") { + throw new Error("Node ID must be a non-empty string"); + } + + const sanitized = nodeId.trim(); + + // Docker node IDs are typically 64 character hex strings + if (!/^[a-f0-9]{64}$/i.test(sanitized)) { + throw new Error("Invalid Docker Swarm node ID format"); + } + + return sanitized; +}; +