From 1e9b4f4f518e55eb315e218574f67620494e77c3 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Tue, 30 Dec 2025 11:45:20 +0000 Subject: [PATCH 1/7] fix: remove lv fail error --- snapshots/devbox/devbox.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 38e96bd9e778..0b758786bbe1 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -1066,23 +1066,45 @@ func (o *Snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin log.G(ctx).Debug("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) err = lvm.CreateVolume(ctx, vol) if err != nil { + // If create fails, we should remove the LVM logical volume + if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after create failure") + } return td, lvName, fmt.Errorf("failed to create LVM logical volume %s: %w", lvName, err) } if err = o.mkfs(lvName); err != nil { + // If mkfs fails, we should remove the LVM logical volume + if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after mkfs failure") + } return td, lvName, fmt.Errorf("failed to create filesystem on LVM logical volume %s: %w", lvName, err) } mounted = true if err = o.mountLvm(ctx, lvName, td); err != nil { + // If mount fails, we should remove the LVM logical volume + if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after mount failure") + } return td, lvName, fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) } if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { + // If fs dir creation fails, we should unmount the LVM logical volume and force destroy it + o.unmountLvm(ctx, td) + if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after fs dir creation failure") + } return td, lvName, fmt.Errorf("failed to create fs directory: %w", err) } if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { + // If work dir creation fails, we should unmount the LVM logical volume and force destroy it + o.unmountLvm(ctx, td) + if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { + log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after work dir creation failure") + } return td, lvName, fmt.Errorf("failed to create work directory: %w", err) } From 640f59a94c1fdc5c912aee23c53c96bf517bf050 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Wed, 31 Dec 2025 07:06:23 +0000 Subject: [PATCH 2/7] feat: lvm lock --- snapshots/devbox/devbox.go | 1 - snapshots/devbox/lvm/lvm.go | 26 +- snapshots/devbox/lvm/lvm_test.go | 433 ++++++++++++++++++++++++++++++- 3 files changed, 455 insertions(+), 5 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 0b758786bbe1..5b4698fad181 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -594,7 +594,6 @@ func (o *Snapshotter) getCleanupLvNames(ctx context.Context) ([]string, error) { } func (o *Snapshotter) resizeLVMVolume(ctx context.Context, lvName, useLimit string) error { - capacity, err := parseUseLimit(useLimit) if err != nil { return fmt.Errorf("failed to parse use limit %s: %w", useLimit, err) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index 1179801e87b2..ab379b4aa07d 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -26,6 +26,7 @@ import ( "path/filepath" "strconv" "strings" + "sync" "syscall" "github.com/pkg/errors" @@ -35,6 +36,9 @@ import ( apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" ) +// lvmLock lvm global lock +var lvmLock sync.Mutex + // lvm related constants const ( DevPath = "/dev/" @@ -322,6 +326,9 @@ func RunCommandSplit(ctx context.Context, command string, args ...string) ([]byt // CreateVolume creates the lvm volume func CreateVolume(ctx context.Context, vol *apis.LVMVolume) error { + lvmLock.Lock() + defer lvmLock.Unlock() + volume := vol.Spec.VolGroup + "/" + vol.Name volExists, err := CheckVolumeExists(ctx, vol) @@ -330,7 +337,7 @@ func CreateVolume(ctx context.Context, vol *apis.LVMVolume) error { } if volExists { klog.Infof("CreateVolume: volume (%s) already exists, skipping its creation", volume) - err := ResizeLVMVolume(ctx, vol, false) + err := resizeLVMVolumeInternal(ctx, vol, false) if err != nil { return err } @@ -354,6 +361,9 @@ func CreateVolume(ctx context.Context, vol *apis.LVMVolume) error { // DestroyVolume deletes the lvm volume func DestroyVolume(ctx context.Context, vol *apis.LVMVolume) error { + lvmLock.Lock() + defer lvmLock.Unlock() + if vol.Spec.VolGroup == "" { klog.Infof("DestroyVolume: volGroup not set for lvm volume %v, skipping its deletion", vol.Name) return nil @@ -392,6 +402,9 @@ func DestroyVolume(ctx context.Context, vol *apis.LVMVolume) error { // ForceDestroyVolume force destroys the lvm volume func ForceDestroyVolume(ctx context.Context, vol *apis.LVMVolume) error { + lvmLock.Lock() + defer lvmLock.Unlock() + if vol.Spec.VolGroup == "" { klog.Infof("ForceDestroyVolume: volGroup not set for lvm volume %v, skipping its deletion", vol.Name) return nil @@ -508,8 +521,16 @@ func buildVolumeResizeArgs(vol *apis.LVMVolume, resizefs bool) []string { // same size will not return any errors // 2. Triggering `lvextend -L ` more than one time will // cause errors +// +// ResizeLVMVolume external interface, with lock func ResizeLVMVolume(ctx context.Context, vol *apis.LVMVolume, resizefs bool) error { + lvmLock.Lock() + defer lvmLock.Unlock() + return resizeLVMVolumeInternal(ctx, vol, resizefs) +} +// resizeLVMVolumeInternal internal function, without lock (for CreateVolume etc.) +func resizeLVMVolumeInternal(ctx context.Context, vol *apis.LVMVolume, resizefs bool) error { // In case if resizefs is not enabled then check current size // before exapnding LVM volume(If volume is already expanded then // it might be error prone). This also makes ResizeLVMVolume func @@ -1001,6 +1022,9 @@ func ListLVMLogicalVolume(ctx context.Context) ([]LogicalVolume, error) { // modified by sealos func ListLVMLogicalVolumeByVG(ctx context.Context, vg string, pool string) ([]LogicalVolume, error) { + lvmLock.Lock() + defer lvmLock.Unlock() + if err := ReloadLVMMetadataCache(ctx); err != nil { return nil, err } diff --git a/snapshots/devbox/lvm/lvm_test.go b/snapshots/devbox/lvm/lvm_test.go index e82ce5b88101..6938dccc6eaf 100644 --- a/snapshots/devbox/lvm/lvm_test.go +++ b/snapshots/devbox/lvm/lvm_test.go @@ -18,8 +18,8 @@ package lvm import ( "context" - "os" "fmt" + "os" "os/exec" "strings" "testing" @@ -29,10 +29,437 @@ import ( ) const ( - testVGName = "devbox-vg" - testPoolName = "devbox-vg-thinpool" + testVGName = "devbox-vg" + testPoolName = "devbox-vg-thinpool" + testLockLVName = "test-lock-lv" ) +// TestCreateVolume_WithLock test create LV (with lock) +func TestCreateVolume_WithLock(t *testing.T) { + ctx := context.Background() + + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testLockLVName, + }, + Spec: apis.VolumeInfo{ + Capacity: "209715200", // 200M in bytes (200 * 1024 * 1024) + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + t.Logf("Creating LV: %s with capacity: 200M", testLockLVName) + + // create LV + err := CreateVolume(ctx, vol) + if err != nil { + t.Fatalf("Failed to create volume: %v", err) + } + + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil { + t.Fatalf("Failed to check volume exists: %v", err) + } + if !exists { + t.Fatal("Volume should exist after creation") + } + + // check device node + devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, testLockLVName) + if _, err := os.Stat(devicePath); os.IsNotExist(err) { + t.Fatalf("Device node %s does not exist", devicePath) + } + + t.Logf("βœ… LV created successfully: %s", testLockLVName) + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify", testLockLVName) +} + +// TestListLVMLogicalVolumeByVG_WithLock test list LV (with lock) +func TestListLVMLogicalVolumeByVG_WithLock(t *testing.T) { + ctx := context.Background() + + t.Logf("Listing LVs in VG: %s", testVGName) + + // list all LVs + lvs, err := ListLVMLogicalVolumeByVG(ctx, testVGName, testPoolName) + if err != nil { + t.Fatalf("Failed to list LVs: %v", err) + } + + t.Logf("Found %d LVs in VG %s", len(lvs), testVGName) + + // find our test LV + found := false + for _, lv := range lvs { + if lv.Name == testLockLVName { + found = true + t.Logf("βœ… Found test LV: %s (Size: %d bytes, VG: %s)", lv.Name, lv.Size, lv.VGName) + break + } + } + + if !found { + t.Logf("⚠️ Test LV %s not found (may not exist yet)", testLockLVName) + } + + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify", testLockLVName) +} + +// TestResizeLVMVolume_WithLock test resize LV (with lock) +func TestResizeLVMVolume_WithLock(t *testing.T) { + ctx := context.Background() + + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testLockLVName, + }, + Spec: apis.VolumeInfo{ + Capacity: "314572800", // 300M in bytes (300 * 1024 * 1024) + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + t.Logf("Resizing LV: %s from 200M to 300M", testLockLVName) + + // resize LV + err := ResizeLVMVolume(ctx, vol, false) + if err != nil { + t.Fatalf("Failed to resize volume: %v", err) + } + + // check new size + currentSize, err := getLVSize(ctx, vol) + if err != nil { + t.Fatalf("Failed to get LV size: %v", err) + } + + expectedSize := uint64(300 * 1024 * 1024) // 300M in bytes + if currentSize < expectedSize { + t.Fatalf("LV size should be at least %d bytes, got %d bytes", expectedSize, currentSize) + } + + t.Logf("βœ… LV resized successfully: %s (new size: %d bytes)", testLockLVName, currentSize) + t.Logf("πŸ‘‰ Run 'lvs -o lv_name,lv_size | grep %s' to verify", testLockLVName) +} + +// TestDestroyVolume_WithLock test destroy LV (with lock) +func TestDestroyVolume_WithLock(t *testing.T) { + ctx := context.Background() + + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testLockLVName, + }, + Spec: apis.VolumeInfo{ + VolGroup: testVGName, + }, + } + + t.Logf("Destroying LV: %s", testLockLVName) + + // destroy LV + err := DestroyVolume(ctx, vol) + if err != nil { + t.Fatalf("Failed to destroy volume: %v", err) + } + + // check if destroyed successfully + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil { + t.Fatalf("Failed to check volume exists: %v", err) + } + if exists { + t.Fatal("Volume should not exist after destruction") + } + + // check device node + devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, testLockLVName) + if _, err := os.Stat(devicePath); !os.IsNotExist(err) { + t.Fatalf("Device node %s should not exist", devicePath) + } + + t.Logf("βœ… LV destroyed successfully: %s", testLockLVName) + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify (should not find it)", testLockLVName) +} + +// TestForceDestroyVolume_WithLock test force destroy LV (with lock) +func TestForceDestroyVolume_WithLock(t *testing.T) { + ctx := context.Background() + + // create a LV + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testLockLVName, + }, + Spec: apis.VolumeInfo{ + Capacity: "209715200", // 200M in bytes + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + t.Logf("Creating LV: %s for force destroy test", testLockLVName) + + err := CreateVolume(ctx, vol) + if err != nil { + t.Fatalf("Failed to create volume: %v", err) + } + + t.Logf("Force destroying LV: %s", testLockLVName) + + // force destroy LV + err = ForceDestroyVolume(ctx, vol) + if err != nil { + t.Fatalf("Failed to force destroy volume: %v", err) + } + + // check if destroyed successfully + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil { + t.Fatalf("Failed to check volume exists: %v", err) + } + if exists { + t.Fatal("Volume should not exist after force destruction") + } + + t.Logf("βœ… LV force destroyed successfully: %s", testLockLVName) + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify (should not find it)", testLockLVName) +} + +// TestLVMFullLifecycle_WithLock test LV full lifecycle (createβ†’listβ†’resizeβ†’delete) +func TestLVMFullLifecycle_WithLock(t *testing.T) { + ctx := context.Background() + + t.Log("========================================") + t.Log("Testing LVM Full Lifecycle with Lock") + t.Log("========================================") + + // 1. create LV + t.Log("\nπŸ“ Step 1: Creating LV...") + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: testLockLVName, + }, + Spec: apis.VolumeInfo{ + Capacity: "209715200", // 200M in bytes + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + err := CreateVolume(ctx, vol) + if err != nil { + t.Fatalf("Step 1 failed: %v", err) + } + + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil || !exists { + t.Fatalf("Step 1 verification failed: volume should exist") + } + + t.Logf("βœ… Step 1: LV created successfully: %s (200M)", testLockLVName) + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify creation", testLockLVName) + + // 2. list LV + t.Log("\nπŸ“ Step 2: Listing LVs...") + lvs, err := ListLVMLogicalVolumeByVG(ctx, testVGName, testPoolName) + if err != nil { + t.Fatalf("Step 2 failed: %v", err) + } + + found := false + for _, lv := range lvs { + if lv.Name == testLockLVName { + found = true + t.Logf("βœ… Step 2: Found LV in list: %s (Size: %d bytes)", lv.Name, lv.Size) + break + } + } + + if !found { + t.Fatalf("Step 2 verification failed: LV not found in list") + } + + // 3. resize LV + t.Log("\nπŸ“ Step 3: Resizing LV from 200M to 400M...") + vol.Spec.Capacity = "419430400" // 400M in bytes (400 * 1024 * 1024) + err = ResizeLVMVolume(ctx, vol, false) + if err != nil { + t.Fatalf("Step 3 failed: %v", err) + } + + currentSize, err := getLVSize(ctx, vol) + if err != nil { + t.Fatalf("Step 3 verification failed: %v", err) + } + + expectedSize := uint64(400 * 1024 * 1024) + if currentSize < expectedSize { + t.Fatalf("Step 3 verification failed: size should be at least %d, got %d", expectedSize, currentSize) + } + + t.Logf("βœ… Step 3: LV resized successfully: %s (400M, actual: %d bytes)", testLockLVName, currentSize) + t.Logf("πŸ‘‰ Run 'lvs -o lv_name,lv_size | grep %s' to verify resize", testLockLVName) + + // 4. list LV again to verify size change + t.Log("\nπŸ“ Step 4: Listing LVs again to verify size change...") + lvs, err = ListLVMLogicalVolumeByVG(ctx, testVGName, testPoolName) + if err != nil { + t.Fatalf("Step 4 failed: %v", err) + } + + found = false + for _, lv := range lvs { + if lv.Name == testLockLVName { + found = true + t.Logf("βœ… Step 4: Verified LV size change: %s (Size: %d bytes)", lv.Name, lv.Size) + break + } + } + + if !found { + t.Fatalf("Step 4 verification failed: LV not found in list") + } + + // 5. destroy LV + t.Log("\nπŸ“ Step 5: Destroying LV...") + err = DestroyVolume(ctx, vol) + if err != nil { + t.Fatalf("Step 5 failed: %v", err) + } + + exists, err = CheckLVMMetadataExists(ctx, vol) + if err != nil || exists { + t.Fatalf("Step 5 verification failed: volume should not exist") + } + + devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, testLockLVName) + if _, err := os.Stat(devicePath); !os.IsNotExist(err) { + t.Fatalf("Step 5 verification failed: device node should not exist") + } + + t.Logf("βœ… Step 5: LV destroyed successfully: %s", testLockLVName) + t.Logf("πŸ‘‰ Run 'lvs | grep %s' to verify deletion (should not find it)", testLockLVName) + + t.Log("\n========================================") + t.Log("βœ… Full Lifecycle Test Completed Successfully!") + t.Log("========================================") +} + +// TestLVMConcurrentOperations_WithLock test concurrent operations (with lock) +func TestLVMConcurrentOperations_WithLock(t *testing.T) { + ctx := context.Background() + + t.Log("========================================") + t.Log("Testing Concurrent LVM Operations") + t.Log("========================================") + + // create multiple LVs + lvNames := []string{ + "test-concurrent-lv-1", + "test-concurrent-lv-2", + "test-concurrent-lv-3", + } + + // cleanup function + cleanup := func() { + for _, name := range lvNames { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apis.VolumeInfo{VolGroup: testVGName}, + } + ForceDestroyVolume(ctx, vol) + } + } + + // check if cleanup is done before and after the test + cleanup() + defer cleanup() + + t.Log("\nπŸ“ Creating 3 LVs concurrently...") + + // concurrent create + done := make(chan error, len(lvNames)) + for _, name := range lvNames { + go func(lvName string) { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{Name: lvName}, + Spec: apis.VolumeInfo{ + Capacity: "104857600", // 100M in bytes (100 * 1024 * 1024) + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + done <- CreateVolume(ctx, vol) + }(name) + } + + // wait for all create to complete + for i := 0; i < len(lvNames); i++ { + if err := <-done; err != nil { + t.Fatalf("Concurrent create failed: %v", err) + } + } + + t.Log("βœ… All LVs created successfully") + + // check if all LVs exist + t.Log("\nπŸ“ Verifying all LVs exist...") + for _, name := range lvNames { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apis.VolumeInfo{VolGroup: testVGName}, + } + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil || !exists { + t.Fatalf("LV %s should exist", name) + } + t.Logf(" βœ“ %s exists", name) + } + + // concurrent delete + t.Log("\nπŸ“ Deleting 3 LVs concurrently...") + for _, name := range lvNames { + go func(lvName string) { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{Name: lvName}, + Spec: apis.VolumeInfo{VolGroup: testVGName}, + } + done <- DestroyVolume(ctx, vol) + }(name) + } + + // wait for all delete to complete + for i := 0; i < len(lvNames); i++ { + if err := <-done; err != nil { + t.Fatalf("Concurrent delete failed: %v", err) + } + } + + t.Log("βœ… All LVs deleted successfully") + + // check if all LVs are deleted + t.Log("\nπŸ“ Verifying all LVs are deleted...") + for _, name := range lvNames { + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: apis.VolumeInfo{VolGroup: testVGName}, + } + exists, err := CheckLVMMetadataExists(ctx, vol) + if err != nil || exists { + t.Fatalf("LV %s should not exist", name) + } + t.Logf(" βœ“ %s deleted", name) + } + + t.Log("\n========================================") + t.Log("βœ… Concurrent Operations Test Completed!") + t.Log(" All operations were serialized by the lock") + t.Log("========================================") +} + // TestForceDestroyVolume_NormalLV tests force destroying a normal LV func TestForceDestroyVolume_NormalLV(t *testing.T) { From 90a4939c740d073e985ecce4e6e96938e36ea50c Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Wed, 31 Dec 2025 08:03:33 +0000 Subject: [PATCH 3/7] feat: lvm lock --- snapshots/devbox/devbox.go | 26 ++--------- snapshots/devbox/lvm/lvm.go | 89 +++++++++++++++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 23 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 5b4698fad181..c1feaf65c4c6 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -721,36 +721,16 @@ func (o *Snapshotter) mkfs(lvName string) error { } func (o *Snapshotter) mountLvm(ctx context.Context, lvName string, path string) error { - _, err := os.Stat(path) - if os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return fmt.Errorf("failed to create directory %s: %w", path, err) - } - } else if err != nil { - return fmt.Errorf("failed to stat path %s: %w", path, err) - } devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) - err = syscall.Mount(devicePath, path, "ext4", 0, "") - if err != nil { + if err := lvm.MountVolume(devicePath, path, "ext4", 0, ""); err != nil { return fmt.Errorf("failed to mount LVM logical volume %s to %s: %w", devicePath, path, err) } return nil } +// unmountLvm unmounts the LVM logical volume func (o *Snapshotter) unmountLvm(ctx context.Context, path string) error { - isMounted, err := isMountPoint(path) - if err != nil { - return fmt.Errorf("failed to check if path %s is a mount point: %w", path, err) - } - if !isMounted { - log.G(ctx).Infof("Path %s is not mounted, skipping unmount", path) - return nil - } - err = syscall.Unmount(path, 0) - if err != nil { - return fmt.Errorf("failed to unmount path %s: %w", path, err) - } - return nil + return lvm.UnmountVolume(path) } // end modified by sealos diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index ab379b4aa07d..567cd93c9768 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -436,6 +436,95 @@ func ForceDestroyVolume(ctx context.Context, vol *apis.LVMVolume) error { return nil } +// MountVolume mounts an LVM logical volume to the specified path +// This function is protected by the global LVM lock to prevent concurrent operations +func MountVolume(devicePath, mountPath, fsType string, flags uintptr, options string) error { + lvmLock.Lock() + defer lvmLock.Unlock() + + // check if the mount path exists, if not create it + if _, err := os.Stat(mountPath); os.IsNotExist(err) { + if err := os.MkdirAll(mountPath, 0755); err != nil { + return fmt.Errorf("failed to create mount directory %s: %w", mountPath, err) + } + } else if err != nil { + // handle other errors (e.g., permission denied, IO error, etc.) + return fmt.Errorf("failed to stat mount path %s: %w", mountPath, err) + } + + // check if the device exists + if _, err := os.Stat(devicePath); os.IsNotExist(err) { + return fmt.Errorf("device %s does not exist: %w", devicePath, err) + } else if err != nil { + // handle other errors (e.g., permission denied, IO error, etc.) + return fmt.Errorf("failed to stat device %s: %w", devicePath, err) + } + + // execute mount + if err := syscall.Mount(devicePath, mountPath, fsType, flags, options); err != nil { + return fmt.Errorf("failed to mount %s to %s: %w", devicePath, mountPath, err) + } + + klog.Infof("lvm: successfully mounted %s to %s", devicePath, mountPath) + return nil +} + +// UnmountVolume unmounts a path +// This function is protected by the global LVM lock to prevent concurrent operations +// It uses the same lock as LVM operations because unmount often happens alongside +// LVM operations (resize, remove) and they may conflict on the same device +func UnmountVolume(mountPath string) error { + lvmLock.Lock() + defer lvmLock.Unlock() + + // check if the path is a mount point + isMounted, err := isMountPoint(mountPath) + if err != nil { + return fmt.Errorf("failed to check if %s is a mount point: %w", mountPath, err) + } + + if !isMounted { + klog.Infof("lvm: path %s is not a mount point, skipping unmount", mountPath) + return nil + } + + // unmount + if err := syscall.Unmount(mountPath, 0); err != nil { + klog.Warningf("lvm: failed to unmount %s: %v", mountPath, err) + return fmt.Errorf("failed to unmount %s: %w", mountPath, err) + } + + klog.Infof("lvm: successfully unmounted %s", mountPath) + return nil +} + +// isMountPoint checks if a directory is a mount point +func isMountPoint(dir string) (bool, error) { + // check if the directory exists + if _, err := os.Stat(dir); os.IsNotExist(err) { + return false, nil + } + + // get directory information + dirStat, err := os.Stat(dir) + if err != nil { + return false, fmt.Errorf("failed to stat %s: %w", dir, err) + } + + // get parent directory information + parentDir := filepath.Dir(dir) + parentStat, err := os.Stat(parentDir) + if err != nil { + return false, fmt.Errorf("failed to stat parent %s: %w", parentDir, err) + } + + // if the directory and parent directory have different device numbers, it is a mount point + dirDev := dirStat.Sys().(*syscall.Stat_t).Dev + parentDev := parentStat.Sys().(*syscall.Stat_t).Dev + + return dirDev != parentDev, nil +} + // CheckLVMMetadataExists checks if the lvm volume exists in metadata func CheckLVMMetadataExists(ctx context.Context, vol *apis.LVMVolume) (bool, error) { // reload lvm metadata cache to ensure the metadata is up to date From 3a410c8a3e0ba19c5dffafe2cf74a5bc71e8a407 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Wed, 31 Dec 2025 09:29:28 +0000 Subject: [PATCH 4/7] feat: lvm lock --- snapshots/devbox/devbox.go | 11 ++++++++--- snapshots/devbox/lvm/lvm.go | 4 ++-- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index c1feaf65c4c6..4dfb711bf571 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -366,7 +366,7 @@ func (o *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snap } func (o *Snapshotter) RemoveDir(ctx context.Context, dir string) { - isMounted, err := isMountPoint(dir) + isMounted, err := lvm.IsMountPoint(dir) if err != nil { log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to check if path is a mount point") return @@ -723,6 +723,7 @@ func (o *Snapshotter) mkfs(lvName string) error { func (o *Snapshotter) mountLvm(ctx context.Context, lvName string, path string) error { devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) if err := lvm.MountVolume(devicePath, path, "ext4", 0, ""); err != nil { + log.G(ctx).WithError(err).WithField("devicePath", devicePath).WithField("path", path).Warn("failed to mount LVM logical volume") return fmt.Errorf("failed to mount LVM logical volume %s to %s: %w", devicePath, path, err) } return nil @@ -730,7 +731,11 @@ func (o *Snapshotter) mountLvm(ctx context.Context, lvName string, path string) // unmountLvm unmounts the LVM logical volume func (o *Snapshotter) unmountLvm(ctx context.Context, path string) error { - return lvm.UnmountVolume(path) + if err := lvm.UnmountVolume(path); err != nil { + log.G(ctx).WithError(err).WithField("path", path).Warn("failed to unmount LVM logical volume") + return fmt.Errorf("failed to unmount LVM logical volume %s: %w", path, err) + } + return nil } // end modified by sealos @@ -795,7 +800,7 @@ func (o *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, k // mount point for the snapshot log.G(ctx).Debug("LVM logical volume name found for content ID:", contentID, "is", lvName) var isMounted bool - if isMounted, err = isMountPoint(npath); err != nil { + if isMounted, err = lvm.IsMountPoint(npath); err != nil { return fmt.Errorf("failed to check if path is a mount point: %w", err) } else if isMounted { log.G(ctx).Infof("Path %s is already mounted, skipping mount", npath) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index 567cd93c9768..d542d4eb6dc3 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -478,7 +478,7 @@ func UnmountVolume(mountPath string) error { defer lvmLock.Unlock() // check if the path is a mount point - isMounted, err := isMountPoint(mountPath) + isMounted, err := IsMountPoint(mountPath) if err != nil { return fmt.Errorf("failed to check if %s is a mount point: %w", mountPath, err) } @@ -499,7 +499,7 @@ func UnmountVolume(mountPath string) error { } // isMountPoint checks if a directory is a mount point -func isMountPoint(dir string) (bool, error) { +func IsMountPoint(dir string) (bool, error) { // check if the directory exists if _, err := os.Stat(dir); os.IsNotExist(err) { return false, nil From 7899ae1461f406b5d7766994369859cf2fa13874 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Mon, 5 Jan 2026 07:40:13 +0000 Subject: [PATCH 5/7] add lv lock to mount operation --- snapshots/devbox/lvm/lvm.go | 54 +++++++++++ snapshots/devbox/lvm/lvm_test.go | 155 +++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+) diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index d542d4eb6dc3..d7469fba39f6 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -498,6 +498,60 @@ func UnmountVolume(mountPath string) error { return nil } +// FindMountPointByDevice finds the mount point for a given device path by reading /proc/mounts +// Returns the mount point path if found, empty string if not mounted, and error on failure +func FindMountPointByDevice(devicePath string) (string, error) { + lvmLock.Lock() + defer lvmLock.Unlock() + + data, err := os.ReadFile("/proc/mounts") + if err != nil { + return "", fmt.Errorf("failed to read /proc/mounts: %w", err) + } + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if len(line) == 0 { + continue + } + + fields := strings.Fields(line) + if len(fields) < 2 { + continue + } + + // fields[0] is the device path, fields[1] is the mount point + mountDevice := fields[0] + mountPoint := fields[1] + + // Check if the device matches (handle both direct path and symlink resolution) + if mountDevice == devicePath { + return mountPoint, nil + } + + // Resolve both paths and compare + resolvedDevicePath, err1 := filepath.EvalSymlinks(devicePath) + resolvedMountDevice, err2 := filepath.EvalSymlinks(mountDevice) + + // If both resolve successfully, compare resolved paths + if err1 == nil && err2 == nil { + if resolvedDevicePath == resolvedMountDevice { + return mountPoint, nil + } + } + + // Also check if one resolves to the other + if err1 == nil && resolvedDevicePath == mountDevice { + return mountPoint, nil + } + if err2 == nil && resolvedMountDevice == devicePath { + return mountPoint, nil + } + } + + return "", nil +} + // isMountPoint checks if a directory is a mount point func IsMountPoint(dir string) (bool, error) { // check if the directory exists diff --git a/snapshots/devbox/lvm/lvm_test.go b/snapshots/devbox/lvm/lvm_test.go index 6938dccc6eaf..578184cbf15e 100644 --- a/snapshots/devbox/lvm/lvm_test.go +++ b/snapshots/devbox/lvm/lvm_test.go @@ -713,3 +713,158 @@ func TestForceDestroyVolume_Idempotent(t *testing.T) { t.Fatalf("Force destroy should be idempotent: %v", err) } } + +// TestIsMountPoint tests the IsMountPoint function +func TestIsMountPoint(t *testing.T) { + // Test 1: Check a known mount point (e.g., /proc) + // /proc is typically always mounted + isMounted, err := IsMountPoint("/proc") + if err != nil { + t.Fatalf("Failed to check /proc mount point: %v", err) + } + if !isMounted { + t.Logf("Warning: /proc is not detected as a mount point (this might be normal in some environments)") + } else { + t.Logf("Successfully detected /proc as a mount point") + } + + // Test 2: Check a regular directory (should not be a mount point) + // Use /tmp as it's typically not a mount point (unless specifically mounted) + tmpDir := "/tmp" + isMounted, err = IsMountPoint(tmpDir) + if err != nil { + t.Fatalf("Failed to check /tmp mount point: %v", err) + } + t.Logf("/tmp is mounted: %v", isMounted) + + // Test 3: Check a non-existent directory + nonExistentDir := "/nonexistent/directory/path" + isMounted, err = IsMountPoint(nonExistentDir) + if err != nil { + t.Fatalf("Failed to check non-existent directory: %v", err) + } + if isMounted { + t.Errorf("Non-existent directory should not be detected as a mount point") + } + t.Logf("Non-existent directory is mounted: %v (expected: false)", isMounted) + + // Test 4: Check /sys (another known mount point) + isMounted, err = IsMountPoint("/sys") + if err != nil { + t.Fatalf("Failed to check /sys mount point: %v", err) + } + if !isMounted { + t.Logf("Warning: /sys is not detected as a mount point (this might be normal in some environments)") + } else { + t.Logf("Successfully detected /sys as a mount point") + } + + // Test 5: Check root directory (should not be a mount point by definition) + isMounted, err = IsMountPoint("/var/lib/containerd/io.containerd.snapshotter.v1.devbox/snapshots/7301") + if err != nil { + t.Fatalf("Failed to check root directory: %v", err) + } + // Root directory's parent is itself, so it should not be detected as a mount point + // (unless it's in a chroot environment, but that's rare) + t.Logf("Root directory /var/lib/containerd/io.containerd.snapshotter.v1.devbox/snapshots/7301 is mounted: %v", isMounted) +} + +// isMountPointByProcMounts checks if a directory is a mount point by reading /proc/mounts +// This is the implementation from devbox.go for comparison +func isMountPointByProcMounts(dir string) (bool, error) { + data, err := os.ReadFile("/proc/mounts") + if err != nil { + return false, fmt.Errorf("failed to read /proc/mounts: %w", err) + } + + lines := strings.Split(string(data), "\n") + for _, line := range lines { + if len(line) == 0 { + continue + } + + fields := strings.Fields(line) + if len(fields) < 2 { + continue + } + + mountPoint := fields[1] + if mountPoint == dir { + return true, nil + } + } + + return false, nil +} + +// BenchmarkIsMountPoint_DeviceNumber benchmarks the device number comparison method +func BenchmarkIsMountPoint_DeviceNumber(b *testing.B) { + testPaths := []string{"/proc", "/sys", "/tmp", "/", "/nonexistent"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, path := range testPaths { + _, _ = IsMountPoint(path) + } + } +} + +// BenchmarkIsMountPoint_ProcMounts benchmarks the /proc/mounts reading method +func BenchmarkIsMountPoint_ProcMounts(b *testing.B) { + testPaths := []string{"/proc", "/sys", "/tmp", "/", "/nonexistent"} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, path := range testPaths { + _, _ = isMountPointByProcMounts(path) + } + } +} + +// BenchmarkIsMountPoint_Single_DeviceNumber benchmarks single check with device number method +func BenchmarkIsMountPoint_Single_DeviceNumber(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = IsMountPoint("/proc") + } +} + +// BenchmarkIsMountPoint_Single_ProcMounts benchmarks single check with /proc/mounts method +func BenchmarkIsMountPoint_Single_ProcMounts(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = isMountPointByProcMounts("/proc") + } +} + +// TestIsMountPoint_PerformanceComparison compares the performance of both methods +func TestIsMountPoint_PerformanceComparison(t *testing.T) { + testPaths := []string{"/proc", "/sys", "/tmp", "/", "/nonexistent"} + + // Test both methods return the same results + for _, path := range testPaths { + result1, err1 := IsMountPoint(path) + result2, err2 := isMountPointByProcMounts(path) + + if err1 != nil && err2 != nil { + // Both failed, that's okay for some paths + continue + } + + if err1 != nil { + t.Logf("Device number method failed for %s: %v", path, err1) + continue + } + + if err2 != nil { + t.Logf("ProcMounts method failed for %s: %v", path, err2) + continue + } + + if result1 != result2 { + t.Logf("Warning: Different results for %s - DeviceNumber: %v, ProcMounts: %v", path, result1, result2) + } else { + t.Logf("Path %s: both methods agree - mounted: %v", path, result1) + } + } +} From 11a5db6d03aa3b861e97427ac7648dc2c5736cc2 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Tue, 6 Jan 2026 09:56:40 +0000 Subject: [PATCH 6/7] add lv lock in cleanup operation --- snapshots/devbox/devbox.go | 73 +------------- snapshots/devbox/devbox_test.go | 28 +++--- snapshots/devbox/lvm/lvm.go | 23 +++-- snapshots/devbox/lvm/lvm_test.go | 161 +++++++++++++++++++++++++++++++ 4 files changed, 190 insertions(+), 95 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 4dfb711bf571..88186b94cb96 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -511,7 +511,7 @@ func (o *Snapshotter) cleanupDirectories(ctx context.Context) (_ []string, _ []s // Unmount any mounted LVs for _, lvName := range removedLvNames { devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) - mountPoints, err := findMountPointByDevice(devicePath) + mountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { log.G(ctx).WithError(err).WithField("lvName", lvName).WithField("devicePath", devicePath). Warn("Cleanup: failed to find mount point for LV, continuing") @@ -639,55 +639,6 @@ func readProcMounts() ([][]string, error) { return mounts, nil } -// findMountPointByDevice finds the mount point for a given device path by reading /proc/mounts -// Returns the mount point path if found, empty string if not mounted, and error on failure -func findMountPointByDevice(devicePath string) ([]string, error) { - mounts, err := readProcMounts() - if err != nil { - return nil, err - } - - var mountPoints []string - for _, fields := range mounts { - if len(fields) < 2 { - continue - } - - mountDevice := fields[0] - mountPoint := fields[1] - - // Check if the device matches (handle both direct path and symlink resolution) - if mountDevice == devicePath { - mountPoints = append(mountPoints, mountPoint) - continue - } - - // Resolve both paths and compare - resolvedDevicePath, err1 := filepath.EvalSymlinks(devicePath) - resolvedMountDevice, err2 := filepath.EvalSymlinks(mountDevice) - - // If both resolve successfully, compare resolved paths - if err1 == nil && err2 == nil { - if resolvedDevicePath == resolvedMountDevice { - mountPoints = append(mountPoints, mountPoint) - continue - } - } - - // Also check if one resolves to the other - if err1 == nil && resolvedDevicePath == mountDevice { - mountPoints = append(mountPoints, mountPoint) - continue - } - if err2 == nil && resolvedMountDevice == devicePath { - mountPoints = append(mountPoints, mountPoint) - continue - } - } - - return mountPoints, nil -} - func isMountPoint(dir string) (bool, error) { mounts, err := readProcMounts() if err != nil { @@ -1050,45 +1001,23 @@ func (o *Snapshotter) prepareLvmDirectory(ctx context.Context, snapshotDir strin log.G(ctx).Debug("Creating LVM volume:", lvName, "with capacity:", capacity, "in volume group:", o.lvmVgName) err = lvm.CreateVolume(ctx, vol) if err != nil { - // If create fails, we should remove the LVM logical volume - if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { - log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after create failure") - } return td, lvName, fmt.Errorf("failed to create LVM logical volume %s: %w", lvName, err) } if err = o.mkfs(lvName); err != nil { - // If mkfs fails, we should remove the LVM logical volume - if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { - log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after mkfs failure") - } return td, lvName, fmt.Errorf("failed to create filesystem on LVM logical volume %s: %w", lvName, err) } mounted = true if err = o.mountLvm(ctx, lvName, td); err != nil { - // If mount fails, we should remove the LVM logical volume - if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { - log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after mount failure") - } return td, lvName, fmt.Errorf("failed to mount LVM logical volume %s: %w", lvName, err) } if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil { - // If fs dir creation fails, we should unmount the LVM logical volume and force destroy it - o.unmountLvm(ctx, td) - if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { - log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after fs dir creation failure") - } return td, lvName, fmt.Errorf("failed to create fs directory: %w", err) } if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil { - // If work dir creation fails, we should unmount the LVM logical volume and force destroy it - o.unmountLvm(ctx, td) - if err1 := o.forceRemoveLv(ctx, lvName); err1 != nil { - log.G(ctx).WithError(err1).WithField("lvName", lvName).Warn("failed to force destroy LVM logical volume after work dir creation failure") - } return td, lvName, fmt.Errorf("failed to create work directory: %w", err) } diff --git a/snapshots/devbox/devbox_test.go b/snapshots/devbox/devbox_test.go index 2e678ed0977f..ccad124ea877 100644 --- a/snapshots/devbox/devbox_test.go +++ b/snapshots/devbox/devbox_test.go @@ -125,7 +125,7 @@ func TestFindMountPointAndUnmount(t *testing.T) { // Step 5: Test findMountPointByDevice t.Logf("Step 5: Testing findMountPointByDevice for %s", devicePath) - foundMountPoints, err := findMountPointByDevice(devicePath) + foundMountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -150,7 +150,7 @@ func TestFindMountPointAndUnmount(t *testing.T) { // Step 7: Verify the mount point is no longer mounted t.Logf("Step 7: Verifying mount point is no longer mounted") - foundMountPoints, err = findMountPointByDevice(devicePath) + foundMountPoints, err = lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed after unmount: %v", err) } @@ -194,7 +194,7 @@ func TestFindMountPointByDevice_UnmountedDevice(t *testing.T) { devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, lvName) // Test findMountPointByDevice on an unmounted device - mountPoints, err := findMountPointByDevice(devicePath) + mountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -307,7 +307,7 @@ func TestFindMountPointAndUnmount_Concurrent(t *testing.T) { }() // Step 5: Test findMountPointByDevice (concurrent access) - foundMountPoints, err := findMountPointByDevice(devicePath) + foundMountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { errorChan <- fmt.Errorf("goroutine %d: findMountPointByDevice failed: %w", index, err) return @@ -333,7 +333,7 @@ func TestFindMountPointAndUnmount_Concurrent(t *testing.T) { mounted = false // Mark as unmounted so defer doesn't try again // Step 7: Verify the mount point is no longer mounted - foundMountPoints, err = findMountPointByDevice(devicePath) + foundMountPoints, err = lvm.FindMountPointByDevice(devicePath) if err != nil { errorChan <- fmt.Errorf("goroutine %d: findMountPointByDevice failed after unmount: %w", index, err) return @@ -661,7 +661,7 @@ func TestFindMountPointByDevice(t *testing.T) { // Step 2: Test findMountPointByDevice on unmounted device (should return empty) t.Logf("Step 2: Testing findMountPointByDevice on unmounted device") - mountPoints, err := findMountPointByDevice(devicePath) + mountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -704,7 +704,7 @@ func TestFindMountPointByDevice(t *testing.T) { // Step 6: Test findMountPointByDevice on mounted device t.Logf("Step 6: Testing findMountPointByDevice on mounted device") - mountPoints, err = findMountPointByDevice(devicePath) + mountPoints, err = lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -724,7 +724,7 @@ func TestFindMountPointByDevice(t *testing.T) { t.Fatalf("Failed to unmount %s: %v", expectedMountPoint, err) } - mountPoints, err = findMountPointByDevice(devicePath) + mountPoints, err = lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed after unmount: %v", err) } @@ -813,7 +813,7 @@ func TestFindMountPointByDevice_MultipleMountPoints(t *testing.T) { mountedPoints = append(mountedPoints, mountPoint) // Verify it's mounted after each mount - mountPoints, err := findMountPointByDevice(devicePath) + mountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed after mounting to %s: %v", mountPoint, err) } @@ -833,7 +833,7 @@ func TestFindMountPointByDevice_MultipleMountPoints(t *testing.T) { // Step 6: Test findMountPointByDevice finds all mount points t.Logf("Step 6: Testing findMountPointByDevice finds all %d mount points", numMountPoints) - foundMountPoints, err := findMountPointByDevice(devicePath) + foundMountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -951,7 +951,7 @@ func TestCleanupUnmountAllMountPoints(t *testing.T) { // Step 5: Verify all mount points are mounted t.Logf("Step 5: Verifying all %d mount points are mounted", numMountPoints) - foundMountPoints, err := findMountPointByDevice(devicePath) + foundMountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } @@ -967,7 +967,7 @@ func TestCleanupUnmountAllMountPoints(t *testing.T) { // Simulate the cleanup logic from cleanupDirectories for _, lvNameToCleanup := range removedLvNames { devicePathToCheck := fmt.Sprintf("/dev/%s/%s", snapshotter.lvmVgName, lvNameToCleanup) - mountPointsToUnmount, err := findMountPointByDevice(devicePathToCheck) + mountPointsToUnmount, err := lvm.FindMountPointByDevice(devicePathToCheck) if err != nil { t.Fatalf("Failed to find mount points for LV %s: %v", lvNameToCleanup, err) } @@ -992,7 +992,7 @@ func TestCleanupUnmountAllMountPoints(t *testing.T) { // Step 7: Verify all mount points are unmounted t.Logf("Step 7: Verifying all mount points are unmounted") - foundMountPoints, err = findMountPointByDevice(devicePath) + foundMountPoints, err = lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed after unmount: %v", err) } @@ -1004,7 +1004,7 @@ func TestCleanupUnmountAllMountPoints(t *testing.T) { // Step 8: Verify device can be removed (no mount points should allow removal) t.Logf("Step 8: Verifying device has no mount points (can be removed)") - remainingMountPoints, err := findMountPointByDevice(devicePath) + remainingMountPoints, err := lvm.FindMountPointByDevice(devicePath) if err != nil { t.Fatalf("findMountPointByDevice failed: %v", err) } diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index d7469fba39f6..f86691fc3b94 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -498,17 +498,18 @@ func UnmountVolume(mountPath string) error { return nil } -// FindMountPointByDevice finds the mount point for a given device path by reading /proc/mounts -// Returns the mount point path if found, empty string if not mounted, and error on failure -func FindMountPointByDevice(devicePath string) (string, error) { +// FindMountPointByDevice finds all mount points for a given device path by reading /proc/mounts +// Returns a slice of mount point paths if found, empty slice if not mounted, and error on failure +func FindMountPointByDevice(devicePath string) ([]string, error) { lvmLock.Lock() defer lvmLock.Unlock() data, err := os.ReadFile("/proc/mounts") if err != nil { - return "", fmt.Errorf("failed to read /proc/mounts: %w", err) + return nil, fmt.Errorf("failed to read /proc/mounts: %w", err) } + var mountPoints []string lines := strings.Split(string(data), "\n") for _, line := range lines { if len(line) == 0 { @@ -526,7 +527,8 @@ func FindMountPointByDevice(devicePath string) (string, error) { // Check if the device matches (handle both direct path and symlink resolution) if mountDevice == devicePath { - return mountPoint, nil + mountPoints = append(mountPoints, mountPoint) + continue } // Resolve both paths and compare @@ -536,20 +538,23 @@ func FindMountPointByDevice(devicePath string) (string, error) { // If both resolve successfully, compare resolved paths if err1 == nil && err2 == nil { if resolvedDevicePath == resolvedMountDevice { - return mountPoint, nil + mountPoints = append(mountPoints, mountPoint) + continue } } // Also check if one resolves to the other if err1 == nil && resolvedDevicePath == mountDevice { - return mountPoint, nil + mountPoints = append(mountPoints, mountPoint) + continue } if err2 == nil && resolvedMountDevice == devicePath { - return mountPoint, nil + mountPoints = append(mountPoints, mountPoint) + continue } } - return "", nil + return mountPoints, nil } // isMountPoint checks if a directory is a mount point diff --git a/snapshots/devbox/lvm/lvm_test.go b/snapshots/devbox/lvm/lvm_test.go index 578184cbf15e..f006aebcccb9 100644 --- a/snapshots/devbox/lvm/lvm_test.go +++ b/snapshots/devbox/lvm/lvm_test.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "os/exec" + "path/filepath" "strings" "testing" @@ -868,3 +869,163 @@ func TestIsMountPoint_PerformanceComparison(t *testing.T) { } } } + +// TestFindMountPointByDevice_MultipleMountPoints tests that FindMountPointByDevice can find all mount points +func TestFindMountPointByDevice_MultipleMountPoints(t *testing.T) { + ctx := context.Background() + + // Create test LV + lvName := "test-find-mount-multiple" + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + Capacity: "104857600", // 100M in bytes + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + // Create LV + if err := CreateVolume(ctx, vol); err != nil { + t.Fatalf("Failed to create LV: %v", err) + } + + // Ensure cleanup + defer func() { + if err := DestroyVolume(ctx, vol); err != nil { + t.Logf("Warning: Failed to cleanup LV %s: %v", lvName, err) + } + }() + + devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, lvName) + + // Format the device + mkfsCmd := exec.Command("mkfs.ext4", "-F", devicePath) + if output, err := mkfsCmd.CombinedOutput(); err != nil { + t.Fatalf("Failed to format device: %v, output: %s", err, output) + } + + // Create multiple mount points + mountPaths := []string{ + "/tmp/test-mount-1", + "/tmp/test-mount-2", + "/tmp/test-mount-3", + } + + // Cleanup function for mount points + cleanupMounts := func() { + for _, mountPath := range mountPaths { + // Unmount + _ = UnmountVolume(mountPath) + // Remove directory + _ = os.RemoveAll(mountPath) + } + } + defer cleanupMounts() + + // Mount the device to multiple locations + for _, mountPath := range mountPaths { + if err := os.MkdirAll(mountPath, 0755); err != nil { + t.Fatalf("Failed to create mount directory %s: %v", mountPath, err) + } + + if err := MountVolume(devicePath, mountPath, "ext4", 0, ""); err != nil { + t.Fatalf("Failed to mount %s to %s: %v", devicePath, mountPath, err) + } + t.Logf("Successfully mounted %s to %s", devicePath, mountPath) + } + + // Test FindMountPointByDevice + foundMountPoints, err := FindMountPointByDevice(devicePath) + if err != nil { + t.Fatalf("FindMountPointByDevice failed: %v", err) + } + + t.Logf("Found %d mount points: %v", len(foundMountPoints), foundMountPoints) + + // Verify all mount points were found + if len(foundMountPoints) != len(mountPaths) { + t.Errorf("Expected to find %d mount points, but found %d: %v", len(mountPaths), len(foundMountPoints), foundMountPoints) + } + + // Check that all expected mount points are in the result + foundMap := make(map[string]bool) + for _, mp := range foundMountPoints { + foundMap[mp] = true + } + + for _, expectedPath := range mountPaths { + if !foundMap[expectedPath] { + t.Errorf("Expected mount point %s not found in results", expectedPath) + } + } + + // Test with symlink path + symlinkPath, err := filepath.EvalSymlinks(devicePath) + if err != nil { + t.Logf("Warning: Failed to resolve symlink for %s: %v", devicePath, err) + } else if symlinkPath != devicePath { + t.Logf("Testing with symlink path: %s", symlinkPath) + foundMountPoints2, err := FindMountPointByDevice(symlinkPath) + if err != nil { + t.Fatalf("FindMountPointByDevice with symlink failed: %v", err) + } + + if len(foundMountPoints2) != len(mountPaths) { + t.Errorf("Expected to find %d mount points with symlink, but found %d: %v", len(mountPaths), len(foundMountPoints2), foundMountPoints2) + } + } +} + +// TestFindMountPointByDevice_NoMountPoints tests FindMountPointByDevice with unmounted device +func TestFindMountPointByDevice_NoMountPoints(t *testing.T) { + ctx := context.Background() + + // Create test LV + lvName := "test-find-mount-none" + vol := &apis.LVMVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: lvName, + }, + Spec: apis.VolumeInfo{ + Capacity: "104857600", // 100M in bytes + VolGroup: testVGName, + ThinProvision: testPoolName, + }, + } + + // Create LV + if err := CreateVolume(ctx, vol); err != nil { + t.Fatalf("Failed to create LV: %v", err) + } + + // Ensure cleanup + defer func() { + if err := DestroyVolume(ctx, vol); err != nil { + t.Logf("Warning: Failed to cleanup LV %s: %v", lvName, err) + } + }() + + devicePath := fmt.Sprintf("/dev/%s/%s", testVGName, lvName) + + // Format the device + mkfsCmd := exec.Command("mkfs.ext4", "-F", devicePath) + if output, err := mkfsCmd.CombinedOutput(); err != nil { + t.Fatalf("Failed to format device: %v, output: %s", err, output) + } + + // Test FindMountPointByDevice on unmounted device + foundMountPoints, err := FindMountPointByDevice(devicePath) + if err != nil { + t.Fatalf("FindMountPointByDevice failed: %v", err) + } + + t.Logf("Found %d mount points for unmounted device: %v", len(foundMountPoints), foundMountPoints) + + // Should return empty slice for unmounted device + if len(foundMountPoints) != 0 { + t.Errorf("Expected 0 mount points for unmounted device, but found %d: %v", len(foundMountPoints), foundMountPoints) + } +} From 2c8273f4e42d1fd91a72d75ded1b624ca3f903d0 Mon Sep 17 00:00:00 2001 From: Zllinc <2965202581@qq.com> Date: Tue, 13 Jan 2026 07:16:16 +0000 Subject: [PATCH 7/7] add lv lock --- snapshots/devbox/devbox.go | 29 ++++++++--- snapshots/devbox/lvm/lvm.go | 101 ++++++++++++++++++++++++------------ 2 files changed, 89 insertions(+), 41 deletions(-) diff --git a/snapshots/devbox/devbox.go b/snapshots/devbox/devbox.go index 88186b94cb96..cfde39885d10 100644 --- a/snapshots/devbox/devbox.go +++ b/snapshots/devbox/devbox.go @@ -366,13 +366,17 @@ func (o *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snap } func (o *Snapshotter) RemoveDir(ctx context.Context, dir string) { - isMounted, err := lvm.IsMountPoint(dir) + // Hold lock to ensure atomicity between check and use (prevent TOCTOU race condition) + lvm.LockLV() + defer lvm.UnlockLV() + + isMounted, err := lvm.IsMountPointInternal(dir) if err != nil { log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to check if path is a mount point") return } if isMounted { - if err1 := o.unmountLvm(ctx, dir); err1 != nil { + if err1 := lvm.UnmountVolumeInternal(dir); err1 != nil { log.G(ctx).WithError(err1).WithField("path", dir).Warn("failed to unmount directory") return } @@ -395,6 +399,7 @@ func (o *Snapshotter) Remove(ctx context.Context, key string) (err error) { var ( removals []string removedLvNames []string + mountPath string ) log.G(ctx).Infof("Remove called with key: %s", key) @@ -402,6 +407,11 @@ func (o *Snapshotter) Remove(ctx context.Context, key string) (err error) { // return error since the transaction is committed with the removal // key no longer available. defer func() { + if mountPath != "" { + if err = o.unmountLvm(ctx, mountPath); err != nil { + log.G(ctx).WithError(err).WithField("path", mountPath).Warn("failed to unmount directory") + } + } if err == nil { for _, dir := range removals { o.RemoveDir(ctx, dir) @@ -419,17 +429,12 @@ func (o *Snapshotter) Remove(ctx context.Context, key string) (err error) { return o.ms.WithTransaction(ctx, true, func(ctx context.Context) error { // modified by sealos - var mountPath string mountPath, err = storage.RemoveDevbox(ctx, key) log.G(ctx).Infof("Removed devbox content for key: %s, mount path: %s", key, mountPath) if err != nil && err != errdefs.ErrNotFound { return fmt.Errorf("failed to remove devbox content for snapshot %s: %w", key, err) } - if mountPath != "" { - if err = o.unmountLvm(ctx, mountPath); err != nil { - log.G(ctx).WithError(err).WithField("path", mountPath).Warn("failed to unmount directory") - } - } + _, _, err = storage.Remove(ctx, key) if err != nil { return fmt.Errorf("failed to remove snapshot %s: %w", key, err) @@ -640,6 +645,10 @@ func readProcMounts() ([][]string, error) { } func isMountPoint(dir string) (bool, error) { + // Acquire global LVM lock to protect reading /proc/mounts + lvm.RLockLV() + defer lvm.RUnlockLV() + mounts, err := readProcMounts() if err != nil { return false, err @@ -657,6 +666,10 @@ func isMountPoint(dir string) (bool, error) { } func (o *Snapshotter) mkfs(lvName string) error { + // Acquire global LVM lock to protect filesystem operations + lvm.LockLV() + defer lvm.UnlockLV() + devicePath := fmt.Sprintf("/dev/%s/%s", o.lvmVgName, lvName) // Check if the device exists if _, err := os.Stat(devicePath); os.IsNotExist(err) { diff --git a/snapshots/devbox/lvm/lvm.go b/snapshots/devbox/lvm/lvm.go index f86691fc3b94..4b02d402e0b4 100644 --- a/snapshots/devbox/lvm/lvm.go +++ b/snapshots/devbox/lvm/lvm.go @@ -36,8 +36,28 @@ import ( apis "github.com/openebs/lvm-localpv/pkg/apis/openebs.io/lvm/v1alpha1" ) -// lvmLock lvm global lock -var lvmLock sync.Mutex +// lvmLock lvm global read-write lock +var lvmLock sync.RWMutex + +// LockLV acquires the global LVM lock for write operations +func LockLV() { + lvmLock.Lock() +} + +// UnlockLV releases the global LVM lock for write operations +func UnlockLV() { + lvmLock.Unlock() +} + +// RLockLV acquires the global LVM lock for read operations +func RLockLV() { + lvmLock.RLock() +} + +// RUnlockLV releases the global LVM lock for read operations +func RUnlockLV() { + lvmLock.RUnlock() +} // lvm related constants const ( @@ -473,12 +493,11 @@ func MountVolume(devicePath, mountPath, fsType string, flags uintptr, options st // This function is protected by the global LVM lock to prevent concurrent operations // It uses the same lock as LVM operations because unmount often happens alongside // LVM operations (resize, remove) and they may conflict on the same device -func UnmountVolume(mountPath string) error { - lvmLock.Lock() - defer lvmLock.Unlock() - +// UnmountVolumeInternal performs the actual unmount operation without acquiring locks. +// The caller must hold the appropriate lock before calling this function. +func UnmountVolumeInternal(mountPath string) error { // check if the path is a mount point - isMounted, err := IsMountPoint(mountPath) + isMounted, err := IsMountPointInternal(mountPath) if err != nil { return fmt.Errorf("failed to check if %s is a mount point: %w", mountPath, err) } @@ -498,11 +517,46 @@ func UnmountVolume(mountPath string) error { return nil } +// IsMountPointInternal checks if a directory is a mount point without acquiring locks. +// The caller must hold the appropriate lock before calling this function. +func IsMountPointInternal(dir string) (bool, error) { + // check if the directory exists + if _, err := os.Stat(dir); os.IsNotExist(err) { + return false, nil + } + + // get directory information + dirStat, err := os.Stat(dir) + if err != nil { + return false, fmt.Errorf("failed to stat %s: %w", dir, err) + } + + // get parent directory information + parentDir := filepath.Dir(dir) + parentStat, err := os.Stat(parentDir) + if err != nil { + return false, fmt.Errorf("failed to stat parent %s: %w", parentDir, err) + } + + // if the directory and parent directory have different device numbers, it is a mount point + dirDev := dirStat.Sys().(*syscall.Stat_t).Dev + parentDev := parentStat.Sys().(*syscall.Stat_t).Dev + + return dirDev != parentDev, nil +} + +func UnmountVolume(mountPath string) error { + lvmLock.Lock() + defer lvmLock.Unlock() + + return UnmountVolumeInternal(mountPath) +} + // FindMountPointByDevice finds all mount points for a given device path by reading /proc/mounts // Returns a slice of mount point paths if found, empty slice if not mounted, and error on failure func FindMountPointByDevice(devicePath string) ([]string, error) { - lvmLock.Lock() - defer lvmLock.Unlock() + lvmLock.RLock() + defer lvmLock.RUnlock() data, err := os.ReadFile("/proc/mounts") if err != nil { @@ -559,29 +613,10 @@ func FindMountPointByDevice(devicePath string) ([]string, error) { // isMountPoint checks if a directory is a mount point func IsMountPoint(dir string) (bool, error) { - // check if the directory exists - if _, err := os.Stat(dir); os.IsNotExist(err) { - return false, nil - } + lvmLock.RLock() + defer lvmLock.RUnlock() - // get directory information - dirStat, err := os.Stat(dir) - if err != nil { - return false, fmt.Errorf("failed to stat %s: %w", dir, err) - } - - // get parent directory information - parentDir := filepath.Dir(dir) - parentStat, err := os.Stat(parentDir) - if err != nil { - return false, fmt.Errorf("failed to stat parent %s: %w", parentDir, err) - } - - // if the directory and parent directory have different device numbers, it is a mount point - dirDev := dirStat.Sys().(*syscall.Stat_t).Dev - parentDev := parentStat.Sys().(*syscall.Stat_t).Dev - - return dirDev != parentDev, nil + return IsMountPointInternal(dir) } // CheckLVMMetadataExists checks if the lvm volume exists in metadata @@ -1170,8 +1205,8 @@ func ListLVMLogicalVolume(ctx context.Context) ([]LogicalVolume, error) { // modified by sealos func ListLVMLogicalVolumeByVG(ctx context.Context, vg string, pool string) ([]LogicalVolume, error) { - lvmLock.Lock() - defer lvmLock.Unlock() + lvmLock.RLock() + defer lvmLock.RUnlock() if err := ReloadLVMMetadataCache(ctx); err != nil { return nil, err