diff --git a/collection.go b/collection.go index a7b6cb31b..0ead675ec 100644 --- a/collection.go +++ b/collection.go @@ -33,6 +33,15 @@ type CollectionOptions struct { // The given Maps are Clone()d before being used in the Collection, so the // caller can Close() them freely when they are no longer needed. MapReplacements map[string]*Map + + // UnsafeVariableExperiment enables the unsafe variable experiment, allowing + // the use of [VariablePointer] for direct memory access and atomic operations + // on global bpf variables. + // + // Experimental: enabling this may cause segfaults or compromise the memory + // integrity of your Go application or the bpf maps representing Variables. + // Use at your own risk. + UnsafeVariableExperiment bool } // CollectionSpec describes a collection. @@ -618,7 +627,12 @@ func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { // emit a Variable with a nil Memory. This keeps Collection{Spec}.Variables // consistent across systems with different feature sets without breaking // LoadAndAssign. - mm, err := m.Memory() + var mm *Memory + if cl.opts.UnsafeVariableExperiment { + mm, err = m.unsafeMemory() + } else { + mm, err = m.Memory() + } if err != nil && !errors.Is(err, ErrNotSupported) { return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) } diff --git a/internal/unix/types_linux.go b/internal/unix/types_linux.go index 7225220d5..63703c527 100644 --- a/internal/unix/types_linux.go +++ b/internal/unix/types_linux.go @@ -4,6 +4,7 @@ package unix import ( "syscall" + "unsafe" linux "golang.org/x/sys/unix" ) @@ -38,6 +39,7 @@ const ( PROT_WRITE = linux.PROT_WRITE MAP_ANON = linux.MAP_ANON MAP_SHARED = linux.MAP_SHARED + MAP_FIXED = linux.MAP_FIXED MAP_PRIVATE = linux.MAP_PRIVATE PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE @@ -136,6 +138,10 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e return linux.Mmap(fd, offset, length, prot, flags) } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return linux.MmapPtr(fd, offset, addr, length, prot, flags) +} + func Munmap(b []byte) (err error) { return linux.Munmap(b) } diff --git a/internal/unix/types_other.go b/internal/unix/types_other.go index 323f7ff34..f3f764ebe 100644 --- a/internal/unix/types_other.go +++ b/internal/unix/types_other.go @@ -4,6 +4,7 @@ package unix import ( "syscall" + "unsafe" ) // Constants are distinct to avoid breaking switch statements. @@ -37,6 +38,7 @@ const ( PROT_WRITE MAP_ANON MAP_SHARED + MAP_FIXED MAP_PRIVATE PERF_ATTR_SIZE_VER1 PERF_TYPE_SOFTWARE @@ -197,6 +199,10 @@ func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, e return []byte{}, errNonLinux() } +func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) { + return nil, errNonLinux() +} + func Munmap(b []byte) (err error) { return errNonLinux() } diff --git a/map.go b/map.go index e206b9eff..5443c1be7 100644 --- a/map.go +++ b/map.go @@ -432,6 +432,34 @@ func (m *Map) Memory() (*Memory, error) { return mm, nil } +// unsafeMemory returns a heap-mapped memory region for the Map. The Map must +// have been created with the BPF_F_MMAPABLE flag. Repeated calls to Memory +// return the same mapping. Callers are responsible for coordinating access to +// Memory. +func (m *Map) unsafeMemory() (*Memory, error) { + if m.memory != nil { + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newUnsafeMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + func (m *Map) memorySize() (int, error) { switch m.Type() { case Array: diff --git a/memory.go b/memory.go index a48fe68be..3475fb07b 100644 --- a/memory.go +++ b/memory.go @@ -35,8 +35,9 @@ var ErrReadOnly = errors.New("resource is read-only") // for individual values. For accesses beyond a single value, the usual // concurrent programming rules apply. type Memory struct { - b []byte - ro bool + b []byte + ro bool + heap bool } func newMemory(fd, size int) (*Memory, error) { @@ -64,6 +65,7 @@ func newMemory(fd, size int) (*Memory, error) { mm := &Memory{ b, ro, + false, } runtime.SetFinalizer(mm, (*Memory).close) diff --git a/memory_test.go b/memory_test.go index fdb992af7..a4380fb14 100644 --- a/memory_test.go +++ b/memory_test.go @@ -87,7 +87,7 @@ func TestMemoryReadOnly(t *testing.T) { qt.Assert(t, qt.IsTrue(fz.ReadOnly())) } -func TestMemoryUnmap(t *testing.T) { +func TestMemoryClose(t *testing.T) { mm, err := mustMmapableArray(t, 0).Memory() qt.Assert(t, qt.IsNil(err)) diff --git a/memory_unsafe.go b/memory_unsafe.go new file mode 100644 index 000000000..3a612019c --- /dev/null +++ b/memory_unsafe.go @@ -0,0 +1,256 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// This file contains an experimental, unsafe implementation of Memory that +// allows taking a Go pointer to a memory-mapped region. This currently does not +// have first-class support from the Go runtime, so it may break in future Go +// versions. The Go proposal for the runtime to track off-heap pointers is here: +// https://github.com/golang/go/issues/70224. +// +// In Go, the programmer should not have to worry about freeing memory. Since +// this API synthesizes Go variables around global variables declared in a BPF +// C program, we want to lean on the runtime for making sure accessing them is +// safe at all times. Unfortunately, Go (as of 1.24) does not have the ability +// of automatically managing memory that was not allocated by the runtime. +// +// This led to a solution that requests regular Go heap memory by allocating a +// slice (making the runtime track pointers into the slice's backing array) and +// memory-mapping the bpf map's memory over it. Then, before returning the +// Memory to the caller, a finalizer is set on the backing array, making sure +// the bpf map's memory is unmapped from the heap before releasing the backing +// array to the runtime for reallocation. +// +// This obviates the need to maintain a reference to the *Memory at all times, +// which is difficult for the caller to achieve if the variable access is done +// through another object (like a sync.Atomic) that can potentially be passed +// around the Go application. Accidentally losing the reference to the *Memory +// would result in hard-to-debug segfaults, which are always unexpected in Go. + +//go:linkname heapObjectsCanMove runtime.heapObjectsCanMove +func heapObjectsCanMove() bool + +var ErrInvalidType = errors.New("invalid type") + +func newUnsafeMemory(fd, size int) (*Memory, error) { + // Some architectures need the size to be page-aligned to work with MAP_FIXED. + if internal.Align(size, os.Getpagesize()) != size { + return nil, fmt.Errorf("memory: must be a multiple of page size (requested %d bytes)", size) + } + + // Allocate a page-aligned span of memory on the Go heap. + alloc, err := allocate(size) + if err != nil { + return nil, fmt.Errorf("allocating memory: %w", err) + } + + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + // + // Map the bpf map memory over a page-aligned allocation on the Go heap. + err = mapmap(fd, alloc, size, unix.PROT_READ|unix.PROT_WRITE) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + err = mapmap(fd, alloc, size, unix.PROT_READ) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + unsafe.Slice((*byte)(alloc), size), + ro, + true, + } + + return mm, nil +} + +// allocate returns a pointer to a page-aligned section of memory on the Go +// heap, managed by the runtime. +func allocate(size int) (unsafe.Pointer, error) { + // Memory-mapping over a piece of the Go heap is unsafe when the GC can + // randomly decide to move objects around, in which case the mapped region + // will not move along with it. + if heapObjectsCanMove() { + return nil, errors.New("this Go runtime has a moving garbage collector") + } + + if size == 0 { + return nil, errors.New("size must be greater than 0") + } + + // Request at least two pages of memory from the runtime to ensure we can + // align the requested allocation to a page boundary. This is needed for + // MAP_FIXED and makes sure we don't mmap over some other allocation on the Go + // heap. + size = internal.Align(size+os.Getpagesize(), os.Getpagesize()) + + // Allocate a new slice and store a pointer to its backing array. + alloc := unsafe.Pointer(unsafe.SliceData(make([]byte, size))) + + // Align the pointer to a page boundary within the allocation. This may + // alias the initial pointer if it was already page-aligned. + aligned := unsafe.Pointer(internal.Align(uintptr(alloc), uintptr(os.Getpagesize()))) + runtime.KeepAlive(alloc) + + // Return an aligned pointer into the backing array, losing the original + // reference. The runtime.SetFinalizer docs specify that its argument 'must be + // a pointer to an object, complit or local var', but this is still somewhat + // vague and not enforced by the current implementation. + // + // Currently, finalizers can be set and triggered from any address within a + // heap allocation, even individual struct fields or arbitrary offsets within + // a slice. In this case, finalizers set on struct fields or slice offsets + // will only run when the whole struct or backing array are collected. The + // accepted runtime.AddCleanup proposal makes this behaviour more explicit and + // is set to deprecate runtime.SetFinalizer. + // + // Alternatively, we'd have to track the original allocation and the aligned + // pointer separately, which severely complicates finalizer setup and makes it + // prone to human error. For now, just bump the pointer and treat it as the + // new and only reference to the backing array. + return aligned, nil +} + +// mapmap memory-maps the given file descriptor at the given address and sets a +// finalizer on addr to unmap it when it's no longer reachable. +func mapmap(fd int, addr unsafe.Pointer, size, flags int) error { + // Map the bpf map memory over the Go heap. This will result in the following + // mmap layout in the process' address space (0xc000000000 is a span of Go + // heap), visualized using pmap: + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 1824 864 864 rw--- [ anon ] + // 000000c0001c8000 4 4 4 rw-s- [ anon ] + // 000000c0001c9000 2268 16 16 rw--- [ anon ] + // + // This will break up the Go heap, but as long as the runtime doesn't try to + // move our allocation around, this is safe for as long as we hold a reference + // to our allocated object. + // + // Use MAP_SHARED to make sure the kernel sees any writes we do, and MAP_FIXED + // to ensure the mapping starts exactly at the address we requested. If alloc + // isn't page-aligned, the mapping operation will fail. + if _, err := unix.MmapPtr(fd, 0, addr, uintptr(size), + flags, unix.MAP_SHARED|unix.MAP_FIXED); err != nil { + return fmt.Errorf("setting up memory-mapped region: %w", err) + } + + // Set a finalizer on the heap allocation to undo the mapping before the span + // is collected and reused by the runtime. This has a few reasons: + // + // - Avoid leaking memory/mappings. + // - Future writes to this memory should never clobber a bpf map's contents. + // - Some bpf maps are mapped read-only, causing a segfault if the runtime + // reallocates and zeroes the span later. + runtime.SetFinalizer((*byte)(addr), unmap(size)) + + return nil +} + +// unmap returns a function that takes a pointer to a memory-mapped region on +// the Go heap. The function undoes any mappings and discards the span's +// contents. +// +// Used as a finalizer in [newMemory], split off into a separate function for +// testing and to avoid accidentally closing over the unsafe.Pointer to the +// memory region, which would cause a cyclical reference. +// +// The resulting function panics if the mmap operation returns an error, since +// it would mean the integrity of the Go heap is compromised. +func unmap(size int) func(*byte) { + return func(a *byte) { + // Create another mapping at the same address to undo the original mapping. + // This will cause the kernel to repair the slab since we're using the same + // protection mode and flags as the original mapping for the Go heap. + // + // Address Kbytes RSS Dirty Mode Mapping + // 000000c000000000 4096 884 884 rw--- [ anon ] + // + // Using munmap here would leave an unmapped hole in the heap, compromising + // its integrity. + // + // MmapPtr allocates another unsafe.Pointer at the same address. Even though + // we discard it here, it may temporarily resurrect the backing array and + // delay its collection to the next GC cycle. + _, err := unix.MmapPtr(-1, 0, unsafe.Pointer(a), uintptr(size), + unix.PROT_READ|unix.PROT_WRITE, + unix.MAP_PRIVATE|unix.MAP_FIXED|unix.MAP_ANON) + if err != nil { + panic(fmt.Errorf("undoing bpf map memory mapping: %w", err)) + } + } +} + +// checkUnsafeMemory ensures value T can be accessed in mm at offset off. +func checkUnsafeMemory[T any](mm *Memory, off uint64) error { + if mm.b == nil { + return fmt.Errorf("memory-mapped region is nil") + } + if mm.ro { + return ErrReadOnly + } + if !mm.heap { + return fmt.Errorf("memory region is not heap-mapped, see CollectionOptions.UnsafeVariableExperiment: %w", ErrNotSupported) + } + + var t T + size := binary.Size(t) + if size < 0 { + return fmt.Errorf("can't determine size of type %T: %w", t, ErrInvalidType) + } + + align := internal.Align(off, uint64(size)) + if off != align { + return fmt.Errorf("unaligned access of memory-mapped region: size %d at offset %d aligns to %d", size, off, align) + } + + vs, bs := uint64(size), uint64(len(mm.b)) + if off+vs > bs { + return fmt.Errorf("%d-byte value at offset %d exceeds mmap size of %d bytes", vs, off, bs) + } + + return nil +} + +// reinterp reinterprets a pointer of type In to a pointer of type Out. +func reinterp[Out any, In any](in *In) *Out { + return (*Out)(unsafe.Pointer(in)) +} + +// memoryPointer returns a pointer to a value of type T at offset off in mm. +// Taking a pointer to a read-only Memory or to a Memory that is not heap-mapped +// is not supported. +// +// T must be a fixed-size type according to [binary.Size]. Types containing Go +// pointers are not valid. Memory must be writable, off must be aligned to the +// size of T, and the value must be within bounds of the Memory. +// +// To access read-only memory, use [Memory.ReadAt]. +func memoryPointer[T any](mm *Memory, off uint64) (*T, error) { + if err := checkUnsafeMemory[T](mm, off); err != nil { + return nil, fmt.Errorf("memory pointer: %w", err) + } + return reinterp[T](&mm.b[off]), nil +} diff --git a/memory_unsafe_test.go b/memory_unsafe_test.go new file mode 100644 index 000000000..9e6aeeba9 --- /dev/null +++ b/memory_unsafe_test.go @@ -0,0 +1,61 @@ +package ebpf + +import ( + "runtime" + "testing" + "unsafe" + + "github.com/go-quicktest/qt" + + "github.com/cilium/ebpf/internal/sys" +) + +func TestUnsafeMemoryUnmap(t *testing.T) { + mm, err := mustMmapableArray(t, 0).unsafeMemory() + qt.Assert(t, qt.IsNil(err)) + + // Avoid unmap running twice. + runtime.SetFinalizer(unsafe.SliceData(mm.b), nil) + + // unmap panics if the operation fails. + unmap(mm.Size())(unsafe.SliceData(mm.b)) +} + +func TestUnsafeMemoryPointer(t *testing.T) { + mm, err := mustMmapableArray(t, 0).unsafeMemory() + qt.Assert(t, qt.IsNil(err)) + + // Requesting an unaligned value should fail. + _, err = memoryPointer[uint32](mm, 7) + qt.Assert(t, qt.IsNotNil(err)) + + u32, err := memoryPointer[uint32](mm, 12) + qt.Assert(t, qt.IsNil(err)) + + *u32 = 0xf00d + qt.Assert(t, qt.Equals(*u32, 0xf00d)) + + _, err = memoryPointer[*uint32](mm, 0) + qt.Assert(t, qt.ErrorIs(err, ErrInvalidType)) +} + +func TestUnsafeMemoryReadOnly(t *testing.T) { + rd, err := mustMmapableArray(t, sys.BPF_F_RDONLY_PROG).unsafeMemory() + qt.Assert(t, qt.IsNil(err)) + + // BPF_F_RDONLY_PROG flag, so the Memory should be read-only. + qt.Assert(t, qt.IsTrue(rd.ReadOnly())) + + // Frozen maps can't be mapped rw either. + frozen := mustMmapableArray(t, 0) + qt.Assert(t, qt.IsNil(frozen.Freeze())) + fz, err := frozen.Memory() + qt.Assert(t, qt.IsNil(err)) + qt.Assert(t, qt.IsTrue(fz.ReadOnly())) + + _, err = fz.WriteAt([]byte{1}, 0) + qt.Assert(t, qt.ErrorIs(err, ErrReadOnly)) + + _, err = memoryPointer[uint32](fz, 0) + qt.Assert(t, qt.ErrorIs(err, ErrReadOnly)) +} diff --git a/testdata/variables-eb.elf b/testdata/variables-eb.elf index 90442a378..9f89f7873 100644 Binary files a/testdata/variables-eb.elf and b/testdata/variables-eb.elf differ diff --git a/testdata/variables-el.elf b/testdata/variables-el.elf index 3a217bb5f..2af1f2611 100644 Binary files a/testdata/variables-el.elf and b/testdata/variables-el.elf differ diff --git a/testdata/variables.c b/testdata/variables.c index 797622cb4..2c3682687 100644 --- a/testdata/variables.c +++ b/testdata/variables.c @@ -38,4 +38,13 @@ __section("socket") int check_struct() { // Variable aligned on page boundary to ensure all bytes in the mapping can be // accessed through the Variable API. -volatile char var_array[8192] __section(".data.array"); +volatile uint8_t var_array[8192] __section(".data.array"); +__section("socket") int check_array() { + return var_array[sizeof(var_array) - 1] == 0xff; +} + +volatile uint32_t var_atomic __section(".data.atomic"); +__section("socket") int add_atomic() { + __sync_fetch_and_add(&var_atomic, 1); + return 0; +} diff --git a/variable.go b/variable.go index f4f0dd76f..0618351ea 100644 --- a/variable.go +++ b/variable.go @@ -1,6 +1,7 @@ package ebpf import ( + "encoding/binary" "fmt" "io" @@ -232,3 +233,37 @@ func (v *Variable) Get(out any) error { return nil } + +func checkVariable[T any](v *Variable) error { + if v.ReadOnly() { + return ErrReadOnly + } + + var t T + size := binary.Size(t) + if size < 0 { + return fmt.Errorf("can't determine size of type %T: %w", t, ErrInvalidType) + } + + if v.size != uint64(size) { + return fmt.Errorf("can't create %d-byte accessor to %d-byte variable", size, v.size) + } + return nil +} + +// VariablePointer returns a pointer to a variable of type T backed by memory +// shared with the BPF program. Requires +// [CollectionOptions.UnsafeVariableExperiment] to be true. +// +// Taking a pointer to a read-only Variable is not supported. T must be a +// fixed-size type according to [binary.Size]. Types containing Go pointers are +// not valid. +// +// When accessing structs, embedding [structs.HostLayout] may help ensure the +// layout of the Go struct matches the one in the BPF C program. +func VariablePointer[T any](v *Variable) (*T, error) { + if err := checkVariable[T](v); err != nil { + return nil, fmt.Errorf("variable pointer %s: %w", v.name, err) + } + return memoryPointer[T](v.mm, v.offset) +} diff --git a/variable_test.go b/variable_test.go index 76e854d99..deaf524d5 100644 --- a/variable_test.go +++ b/variable_test.go @@ -1,6 +1,9 @@ package ebpf import ( + "runtime" + "structs" + "sync/atomic" "testing" "github.com/go-quicktest/qt" @@ -182,3 +185,113 @@ func TestVariableFallback(t *testing.T) { qt.Assert(t, qt.ErrorIs(err, ErrNotSupported)) } } + +func TestVariablePointer(t *testing.T) { + testutils.SkipIfNotSupported(t, haveMmapableMaps()) + + file := testutils.NativeFile(t, "testdata/variables-%s.elf") + spec, err := LoadCollectionSpec(file) + qt.Assert(t, qt.IsNil(err)) + + obj := struct { + AddAtomic *Program `ebpf:"add_atomic"` + CheckStruct *Program `ebpf:"check_struct"` + CheckArray *Program `ebpf:"check_array"` + + Atomic *Variable `ebpf:"var_atomic"` + Struct *Variable `ebpf:"var_struct"` + Array *Variable `ebpf:"var_array"` + }{} + + qt.Assert(t, qt.IsNil(spec.LoadAndAssign(&obj, &CollectionOptions{UnsafeVariableExperiment: true}))) + t.Cleanup(func() { + obj.AddAtomic.Close() + obj.CheckStruct.Close() + obj.CheckArray.Close() + }) + + // Bump the value by 1 using a bpf program. + want := uint32(1338) + a32, err := VariablePointer[atomic.Uint32](obj.Atomic) + qt.Assert(t, qt.IsNil(err)) + a32.Store(want - 1) + + mustReturn(t, obj.AddAtomic, 0) + qt.Assert(t, qt.Equals(a32.Load(), want)) + + _, err = VariablePointer[*uint32](obj.Atomic) + qt.Assert(t, qt.ErrorIs(err, ErrInvalidType)) + + _, err = VariablePointer[struct{ A, B *uint64 }](obj.Struct) + qt.Assert(t, qt.ErrorIs(err, ErrInvalidType)) + + type S struct { + _ structs.HostLayout + A, B uint64 + } + + s, err := VariablePointer[S](obj.Struct) + qt.Assert(t, qt.IsNil(err)) + *s = S{A: 0xa, B: 0xb} + mustReturn(t, obj.CheckStruct, 1) + + a, err := VariablePointer[[8192]byte](obj.Array) + qt.Assert(t, qt.IsNil(err)) + a[len(a)-1] = 0xff + mustReturn(t, obj.CheckArray, 1) +} + +func TestVariablePointerError(t *testing.T) { + testutils.SkipIfNotSupported(t, haveMmapableMaps()) + + file := testutils.NativeFile(t, "testdata/variables-%s.elf") + spec, err := LoadCollectionSpec(file) + qt.Assert(t, qt.IsNil(err)) + + obj := struct { + Atomic *Variable `ebpf:"var_atomic"` + }{} + + qt.Assert(t, qt.IsNil(spec.LoadAndAssign(&obj, nil))) + + _, err = VariablePointer[atomic.Uint32](obj.Atomic) + qt.Assert(t, qt.ErrorIs(err, ErrNotSupported)) +} + +func TestVariablePointerGC(t *testing.T) { + testutils.SkipIfNotSupported(t, haveMmapableMaps()) + + file := testutils.NativeFile(t, "testdata/variables-%s.elf") + spec, err := LoadCollectionSpec(file) + qt.Assert(t, qt.IsNil(err)) + + obj := struct { + AddAtomic *Program `ebpf:"add_atomic"` + Atomic *Variable `ebpf:"var_atomic"` + }{} + qt.Assert(t, qt.IsNil(spec.LoadAndAssign(&obj, &CollectionOptions{UnsafeVariableExperiment: true}))) + + // Pull out Program handle and Variable pointer so obj reference is dropped. + prog := obj.AddAtomic + t.Cleanup(func() { + prog.Close() + }) + + a32, err := VariablePointer[atomic.Uint32](obj.Atomic) + qt.Assert(t, qt.IsNil(err)) + + // No obj references past this point. Trigger multiple GC cycles to ensure + // obj is collected. + runtime.GC() + runtime.GC() + runtime.GC() + + // Trigger prog and read memory multiple times to ensure reference is still + // valid. + mustReturn(t, prog, 0) + qt.Assert(t, qt.Equals(a32.Load(), 1)) + mustReturn(t, prog, 0) + qt.Assert(t, qt.Equals(a32.Load(), 2)) + mustReturn(t, prog, 0) + qt.Assert(t, qt.Equals(a32.Load(), 3)) +}