diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ffe1ec..081dca1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,8 +30,8 @@ jobs: strategy: matrix: - os: [macos-latest, ubuntu-latest] - go-version: [1.19, '1.20'] + os: [ubuntu-latest] + go-version: ['1.20', 1.21] steps: - name: Install Go @@ -54,4 +54,4 @@ jobs: - name: Run tests run: | go version - go test -timeout 60m -race -v ./... + go test -timeout 180m -race -v ./... diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index f7a2286..37d77fa 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -31,7 +31,7 @@ jobs: run: go build ./... - name: Generate coverage report - run: go test -timeout 60m -race -coverprofile=coverage.txt -covermode=atomic + run: go test -timeout 180m -race -coverprofile=coverage.txt -covermode=atomic - name: Upload coverage report to Codecov uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 diff --git a/array.go b/array.go index 9904942..f8a3ae3 100644 --- a/array.go +++ b/array.go @@ -19,14 +19,18 @@ package atree import ( + "bytes" "encoding/binary" "fmt" "math" "strings" + "sync" "github.com/fxamacker/cbor/v2" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( slabAddressSize = 8 slabIndexSize = 8 @@ -57,6 +61,37 @@ const ( // 32 is faster than 24 and 40. linearScanThreshold = 32 + + // inlined tag number size: CBOR tag number CBORTagInlinedArray or CBORTagInlinedMap + inlinedTagNumSize = 2 + + // inlined CBOR array head size: CBOR array head of 3 elements (extra data index, value id, elements) + inlinedCBORArrayHeadSize = 1 + + // inlined extra data index size: CBOR positive number encoded in 2 bytes [0, 255] (fixed-size for easy computation) + inlinedExtraDataIndexSize = 2 + + // inlined CBOR byte string head size for value ID: CBOR byte string head for byte string of 8 bytes + inlinedCBORValueIDHeadSize = 1 + + // inlined value id size: encoded in 8 bytes + inlinedValueIDSize = 8 + + // inlined array data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data index (2 bytes) [0, 255] + + // value ID index head (1 byte) + + // value ID index (8 bytes) + + // element array head (3 bytes) + inlinedArrayDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize + + arrayDataSlabElementHeadSize + + maxInlinedExtraDataIndex = 255 ) type ArraySlabHeader struct { @@ -69,6 +104,8 @@ type ArrayExtraData struct { TypeInfo TypeInfo // array type } +var _ ExtraData = &ArrayExtraData{} + // ArrayDataSlab is leaf node, implementing ArraySlab. type ArrayDataSlab struct { next SlabID @@ -78,6 +115,10 @@ type ArrayDataSlab struct { // extraData is data that is prepended to encoded slab data. // It isn't included in slab size calculation for splitting and merging. extraData *ArrayExtraData + + // inlined indicates whether this slab is stored inlined in its parent slab. + // This flag affects Encode(), ByteSize(), etc. + inlined bool } func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { @@ -91,6 +132,7 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { } var _ ArraySlab = &ArrayDataSlab{} +var _ ContainerStorable = &ArrayDataSlab{} // ArrayMetaDataSlab is internal node, implementing ArraySlab. type ArrayMetaDataSlab struct { @@ -142,24 +184,68 @@ type ArraySlab interface { SetExtraData(*ArrayExtraData) PopIterate(SlabStorage, ArrayPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } -// Array is tree +// Array is a heterogeneous variable-size array, storing any type of values +// into a smaller ordered list of values and provides efficient functionality +// to lookup, insert and remove elements anywhere in the array. +// +// Array elements can be stored in one or more relatively fixed-sized segments. +// +// Array can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, array with one segment +// which fits in size limit can be inlined, while arrays with multiple segments +// can't be inlined. type Array struct { Storage SlabStorage root ArraySlab + + // parentUpdater is a callback that notifies parent container when this array is modified. + // If this callback is nil, this array has no parent. Otherwise, this array has parent + // and this callback must be used when this array is changed by Append, Insert, Set, Remove, etc. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child array is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. + parentUpdater parentUpdater + + // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. + // This is needed by mutable element to properly update itself through parentUpdater. + // WARNING: since mutableElementIndex is created lazily, we need to create mutableElementIndex + // if it is nil before adding/updating elements. Range, delete, and read are no-ops on nil Go map. + // TODO: maybe optimize by replacing map to get faster updates. + mutableElementIndex map[ValueID]uint64 +} + +var bufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(int(maxThreshold)) + return e + }, +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func putBuffer(e *bytes.Buffer) { + e.Reset() + bufferPool.Put(e) } var _ Value = &Array{} +var _ mutableValueNotifier = &Array{} func (a *Array) Address() Address { return a.root.SlabID().address } -func (a *Array) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(a.SlabID()), nil -} - const arrayExtraDataLength = 1 func newArrayExtraDataFromData( @@ -208,16 +294,24 @@ func newArrayExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) return &ArrayExtraData{TypeInfo: typeInfo}, nil } +func (a *ArrayExtraData) isExtraData() bool { + return true +} + +func (a *ArrayExtraData) Type() TypeInfo { + return a.TypeInfo +} + // Encode encodes extra data as CBOR array: // // [type info] -func (a *ArrayExtraData) Encode(enc *Encoder) error { +func (a *ArrayExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(arrayExtraDataLength) if err != nil { return NewEncodingError(err) } - err = a.TypeInfo.Encode(enc.CBOR) + err = encodeTypeInfo(enc, a.TypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by TypeInfo interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") @@ -353,25 +447,26 @@ func newArrayDataSlabFromDataV0( return nil, NewDecodingError(err) } + // Compute slab size for version 1. + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable - } - - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize + slabSize += storable.ByteSize() } header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -385,23 +480,18 @@ func newArrayDataSlabFromDataV0( // newArrayDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ -// -// Non-root DataSlab Header (18 bytes): +// DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func newArrayDataSlabFromDataV1( id SlabID, h head, @@ -415,6 +505,7 @@ func newArrayDataSlabFromDataV1( ) { var err error var extraData *ArrayExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -426,6 +517,20 @@ func newArrayDataSlabFromDataV1( } } + // Decode inlined slab extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // err is categorized already by newInlinedExtraDataFromData. + return nil, err + } + } + // Decode next slab ID if h.hasNextSlabID() { next, err = NewSlabIDFromRawBytes(data) @@ -450,14 +555,20 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingError(err) } + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable + slabSize += storable.ByteSize() } // Check if data reached EOF @@ -465,15 +576,9 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingErrorf("data has %d bytes of extraneous data for array data slab", len(data)-cborDec.NumBytesDecoded()) } - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize - } - header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -482,30 +587,234 @@ func newArrayDataSlabFromDataV1( header: header, elements: elements, extraData: extraData, + inlined: false, // this function is only called when slab is not inlined. }, nil } -// Encode encodes this array data slab to the given encoder. +// DecodeInlinedArrayStorable decodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, and tag contant +// as 3-element array: // -// Root DataSlab Header: +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedArrayStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedArrayDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedArrayDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d elements, got %d elements", + inlinedArrayDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*ArrayExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect *ArrayExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode array elements (CBOR array) + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + size := uint32(inlinedArrayDataSlabPrefixSize) + + elements := make([]Storable, elemCount) + for i := 0; i < int(elemCount); i++ { + storable, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") + } + elements[i] = storable + + size += storable.ByteSize() + } + + header := ArraySlabHeader{ + slabID: slabID, + size: size, + count: uint32(elemCount), + } + + return &ArrayDataSlab{ + header: header, + elements: elements, + extraData: &ArrayExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + }, + inlined: true, + }, nil +} + +// encodeAsInlined encodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, +// and tag contant as 3-element array: // -// Non-root DataSlab Header (18 bytes): +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder) error { + if a.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root array data slab as inlined")) + } + + if !a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone array data slab as inlined")) + } + + extraDataIndex, err := enc.inlinedExtraData().addArrayExtraData(a.extraData) + if err != nil { + // err is already categorized by InlinedExtraData.addArrayExtraData(). + return err + } + + if extraDataIndex > maxInlinedExtraDataIndex { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedArray, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(a.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: array elements + err = a.encodeElements(enc) + if err != nil { + // err is already categorized by ArrayDataSlab.encodeElements(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// Encode encodes this array data slab to the given encoder. // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// DataSlab Header: +// +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func (a *ArrayDataSlab) Encode(enc *Encoder) error { + if a.inlined { + return a.encodeAsInlined(enc) + } + + // Encoding is done in two steps: + // + // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elementEnc := NewEncoder(elementBuf, enc.encMode) + + err := a.encodeElements(elementEnc) + if err != nil { + // err is already categorized by Array.encodeElements(). + return err + } + + err = elementEnc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + const version = 1 h, err := newArraySlabHead(version, slabArrayData) @@ -513,7 +822,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return NewEncodingError(err) } - if a.hasPointer() { + if a.HasPointer() { h.setHasPointers() } @@ -525,22 +834,35 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } + if elementEnc.hasInlinedExtraData() { + h.setHasInlinedSlabs() + } + // Encode head (version + flag) _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } - // Encode header + // Encode extra data if a.extraData != nil { - // Encode extra data - err = a.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = a.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // err is already categorized by ArrayExtraData.Encode(). return err } } + // Encode inlined extra data + if elementEnc.hasInlinedExtraData() { + err = elementEnc.inlinedExtraData().Encode(enc) + if err != nil { + // err is already categorized by inlinedExtraData.Encode(). + return err + } + } + // Encode next slab ID if a.next != SlabIDUndefined { n, err := a.next.ToRawBytes(enc.Scratch[:]) @@ -555,6 +877,21 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode elements by copying raw bytes from previous buffer + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (a *ArrayDataSlab) encodeElements(enc *Encoder) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 @@ -568,7 +905,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // Write scratch content to encoder totalSize := countOffset + countSize - _, err = enc.Write(enc.Scratch[:totalSize]) + err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) if err != nil { return NewEncodingError(err) } @@ -590,7 +927,80 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } -func (a *ArrayDataSlab) hasPointer() bool { +func (a *ArrayDataSlab) Inlined() bool { + return a.inlined +} + +// Inlinable returns true if +// - array data slab is root slab +// - size of inlined array data slab <= maxInlineSize +func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { + if a.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + // At this point, this data slab is either + // - inlined data slab, or + // - not inlined root data slab + + // Compute inlined size from cached slab size + inlinedSize := a.header.size + if !a.inlined { + inlinedSize = inlinedSize - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + } + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + +// Inline converts not-inlined ArrayDataSlab to inlined ArrayDataSlab and removes it from storage. +func (a *ArrayDataSlab) Inline(storage SlabStorage) error { + if a.inlined { + return NewFatalError(fmt.Errorf("failed to inline ArrayDataSlab %s: it is inlined already", a.header.slabID)) + } + + id := a.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size as inlined slab. + a.header.size = a.header.size - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + + // Update data slab inlined status. + a.inlined = true + + return nil +} + +// Uninline converts an inlined ArrayDataSlab to uninlined ArrayDataSlab and stores it in storage. +func (a *ArrayDataSlab) Uninline(storage SlabStorage) error { + if !a.inlined { + return NewFatalError(fmt.Errorf("failed to un-inline ArrayDataSlab %s: it is not inlined", a.header.slabID)) + } + + // Update data slab size + a.header.size = a.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize + + // Update data slab inlined status + a.inlined = false + + // Store slab in storage + return storeSlab(storage, a) +} + +func (a *ArrayDataSlab) HasPointer() bool { for _, e := range a.elements { if hasPointer(e) { return true @@ -606,6 +1016,9 @@ func (a *ArrayDataSlab) ChildStorables() []Storable { } func (a *ArrayDataSlab) getPrefixSize() uint32 { + if a.inlined { + return inlinedArrayDataSlabPrefixSize + } if a.extraData != nil { return arrayRootDataSlabPrefixSize } @@ -644,10 +1057,11 @@ func (a *ArrayDataSlab) Set(storage SlabStorage, address Address, index uint64, a.header.size = size - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storeSlab(storage, a) + if err != nil { + return nil, err + } } return oldElem, nil @@ -675,10 +1089,11 @@ func (a *ArrayDataSlab) Insert(storage SlabStorage, address Address, index uint6 a.header.count++ a.header.size += storable.ByteSize() - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err = storeSlab(storage, a) + if err != nil { + return err + } } return nil @@ -705,10 +1120,11 @@ func (a *ArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, err a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storeSlab(storage, a) + if err != nil { + return nil, err + } } return v, nil @@ -1320,7 +1736,8 @@ func (a *ArrayMetaDataSlab) Encode(enc *Encoder) error { // Encode extra data if present if a.extraData != nil { - err = a.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = a.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap because err is already categorized by ArrayExtraData.Encode(). return err @@ -1491,11 +1908,11 @@ func (a *ArrayMetaDataSlab) Set(storage SlabStorage, address Address, index uint return existingElem, nil } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } + return existingElem, nil } @@ -1556,13 +1973,7 @@ func (a *ArrayMetaDataSlab) Insert(storage SlabStorage, address Address, index u // Insertion always increases the size, // so there is no need to check underflow - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *ArrayMetaDataSlab) Remove(storage SlabStorage, index uint64) (Storable, error) { @@ -1612,10 +2023,9 @@ func (a *ArrayMetaDataSlab) Remove(storage SlabStorage, index uint64) (Storable, // Removal always decreases the size, // so there is no need to check isFull - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } return v, nil @@ -1648,25 +2058,17 @@ func (a *ArrayMetaDataSlab) SplitChildSlab(storage SlabStorage, child ArraySlab, a.header.size += arraySlabHeaderSize // Store modified slabs - err = storage.Store(left.SlabID(), left) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - - err = storage.Store(right.SlabID(), right) + err = storeSlab(storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // MergeOrRebalanceChildSlab merges or rebalances child slab. @@ -1734,22 +2136,15 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex] = baseCountSum + child.Header().count // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) - } - err = storage.Store(rightSib.SlabID(), rightSib) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // Rebalance with left sib @@ -1769,22 +2164,15 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex-1] = baseCountSum + leftSib.Header().count // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) - } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + return storeSlab(storage, a) } // Rebalance with bigger sib @@ -1804,22 +2192,18 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex-1] = baseCountSum + leftSib.Header().count // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) - } - err = storage.Store(child.SlabID(), child) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + + return storeSlab(storage, a) + } else { // leftSib.ByteSize() <= rightSib.ByteSize @@ -1838,22 +2222,17 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.childrenCountSum[childHeaderIndex] = baseCountSum + child.Header().count // Store modified slabs - err = storage.Store(child.SlabID(), child) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) - } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } - return nil + + return storeSlab(storage, a) } } @@ -1881,16 +2260,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove right sib from storage @@ -1925,16 +2302,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove child from storage @@ -1968,15 +2343,14 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove child from storage @@ -2009,15 +2383,13 @@ func (a *ArrayMetaDataSlab) MergeOrRebalanceChildSlab( a.header.size -= arraySlabHeaderSize // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(a.header.slabID, a) + err = storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return err } // Remove rightSib from storage @@ -2201,7 +2573,23 @@ func (a *ArrayMetaDataSlab) CanLendToRight(size uint32) bool { return a.header.size-arraySlabHeaderSize*n > uint32(minThreshold) } -func (a ArrayMetaDataSlab) IsData() bool { +func (a *ArrayMetaDataSlab) Inlined() bool { + return false +} + +func (a *ArrayMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (a *ArrayMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline ArrayMetaDataSlab %s: ArrayMetaDataSlab can't be inlined", a.header.slabID)) +} + +func (a *ArrayMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline ArrayMetaDataSlab %s: ArrayMetaDataSlab is already unlined", a.header.slabID)) +} + +func (a *ArrayMetaDataSlab) IsData() bool { return false } @@ -2307,10 +2695,9 @@ func NewArray(storage SlabStorage, address Address, typeInfo TypeInfo) (*Array, extraData: extraData, } - err = storage.Store(root.header.slabID, root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.header.slabID)) + return nil, err } return &Array{ @@ -2341,21 +2728,236 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { }, nil } +// TODO: maybe optimize this +func (a *Array) incrementIndexFrom(index uint64) error { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. + for id, i := range a.mutableElementIndex { + if i >= index { + if a.mutableElementIndex[id]+1 >= a.Count() { + return NewFatalError(fmt.Errorf("failed to increment index of ValueID %s in array %s: new index exceeds array count", id, a.ValueID())) + } + a.mutableElementIndex[id]++ + } + } + return nil +} + +// TODO: maybe optimize this +func (a *Array) decrementIndexFrom(index uint64) error { + // Although range loop over Go map is not deterministic, it is OK + // to use here because this operation is free of side-effect and + // leads to the same results independent of map order. + for id, i := range a.mutableElementIndex { + if i > index { + if a.mutableElementIndex[id] <= 0 { + return NewFatalError(fmt.Errorf("failed to decrement index of ValueID %s in array %s: new index < 0", id, a.ValueID())) + } + a.mutableElementIndex[id]-- + } + } + return nil +} + +func (a *Array) getIndexByValueID(id ValueID) (uint64, bool) { + index, exist := a.mutableElementIndex[id] + return index, exist +} + +func (a *Array) setParentUpdater(f parentUpdater) { + a.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value (child) +// so parent array (a) can be notified when child value is modified. +func (a *Array) setCallbackWithChild(i uint64, child Value, maxInlineSize uint64) { + c, ok := child.(mutableValueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + // mutableElementIndex is lazily initialized. + if a.mutableElementIndex == nil { + a.mutableElementIndex = make(map[ValueID]uint64) + } + + // Index i will be updated with array operations, which affects element index. + a.mutableElementIndex[vid] = i + + c.setParentUpdater(func() (found bool, err error) { + + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return true, nil + } + + // Get latest adjusted index by child value ID. + adjustedIndex, exist := a.getIndexByValueID(vid) + if !exist { + return false, nil + } + + storable, err := a.root.Get(a.Storage, adjustedIndex) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). + return false, err + } + + // Verify retrieved element is either SlabIDStorable or Slab, with identical value ID. + switch storable := storable.(type) { + case SlabIDStorable: + sid := SlabID(storable) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := storable.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil + } + + // Set child value with parent array using updated index. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := a.set(adjustedIndex, c) + if err != nil { + return false, err + } + + // Verify overwritten storable has identical value ID. + + switch existingValueStorable := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(existingValueStorable) + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := existingValueStorable.SlabID() + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + + default: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) + } + + return true, nil + }) +} + +// notifyParentIfNeeded calls parent updater if this array (a) is a child element in another container. +func (a *Array) notifyParentIfNeeded() error { + if a.parentUpdater == nil { + return nil + } + + // If parentUpdater() doesn't find child array (a), then no-op on parent container + // and unset parentUpdater callback in child array. This can happen when child + // array is an outdated reference (removed or overwritten in parent container). + found, err := a.parentUpdater() + if err != nil { + return err + } + if !found { + a.parentUpdater = nil + } + return nil +} + func (a *Array) Get(i uint64) (Value, error) { storable, err := a.root.Get(a.Storage, i) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). return nil, err } + v, err := storable.StoredValue(a.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + // As a parent, this array (a) sets up notification callback with child + // value (v) so this array can be notified when child value is modified. + a.setCallbackWithChild(i, v, maxInlineArrayElementSize) + return v, nil } func (a *Array) Set(index uint64, value Value) (Storable, error) { + existingStorable, err := a.set(index, value) + if err != nil { + return nil, err + } + + var existingValueID ValueID + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := existingStorable.(type) { + case ArraySlab: // inlined array slab + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) + + case MapSlab: // inlined map slab + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + existingStorable = SlabIDStorable(s.SlabID()) + existingValueID = slabIDToValueID(s.SlabID()) + + case SlabIDStorable: // uninlined slab + existingValueID = slabIDToValueID(SlabID(s)) + } + + // Remove overwritten array/map's ValueID from mutableElementIndex if: + // - new value isn't array/map, or + // - new value is array/map with different value ID + if existingValueID != emptyValueID { + newValue, ok := value.(mutableValueNotifier) + if !ok || existingValueID != newValue.ValueID() { + delete(a.mutableElementIndex, existingValueID) + } + } + + return existingStorable, nil +} + +func (a *Array) set(index uint64, value Value) (Storable, error) { existingStorable, err := a.root.Set(a.Storage, a.Address(), index, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Set(). @@ -2368,7 +2970,6 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). return nil, err } - return existingStorable, nil } if !a.root.IsData() { @@ -2382,6 +2983,30 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { } } + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by setting new child. + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + return existingStorable, nil } @@ -2398,14 +3023,87 @@ func (a *Array) Insert(index uint64, value Value) error { } if a.root.IsFull() { - // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). - return a.splitRoot() + err = a.splitRoot() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.splitRoot(). + return err + } + } + + err = a.incrementIndexFrom(index) + if err != nil { + return err } + // This array (a) is a parent to the new child (value), and this array + // can also be a child in another container. + // + // As a parent, this array needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this array is a child, it needs to notify its parent because its + // content (also its size) is changed by this "Insert" operation. + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by inserting new child. + err = a.notifyParentIfNeeded() + if err != nil { + return err + } + + // As a parent, this array sets up notification callback with child value + // so this array can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this array notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + a.setCallbackWithChild(index, value, maxInlineArrayElementSize) + return nil } func (a *Array) Remove(index uint64) (Storable, error) { + storable, err := a.remove(index) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case ArraySlab: + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + + case MapSlab: + err = s.Uninline(a.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(s.SlabID()) + delete(a.mutableElementIndex, removedValueID) + + case SlabIDStorable: + // Delete removed element ValueID from mutableElementIndex + removedValueID := slabIDToValueID(SlabID(s)) + delete(a.mutableElementIndex, removedValueID) + } + + return storable, nil +} + +func (a *Array) remove(index uint64) (Storable, error) { storable, err := a.root.Remove(a.Storage, index) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Remove(). @@ -2424,6 +3122,18 @@ func (a *Array) Remove(index uint64) (Storable, error) { } } + err = a.decrementIndexFrom(index) + if err != nil { + return nil, err + } + + // If this array is a child, it notifies parent by invoking callback because + // this array is changed by removing element. + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return storable, nil } @@ -2477,23 +3187,17 @@ func (a *Array) splitRoot() error { a.root = newRoot - err = a.Storage.Store(left.SlabID(), left) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - err = a.Storage.Store(right.SlabID(), right) + err = storeSlab(a.Storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = a.Storage.Store(a.root.SlabID(), a.root) + + err = storeSlab(a.Storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + return err } - return nil + return storeSlab(a.Storage, a.root) } func (a *Array) promoteChildAsNewRoot(childID SlabID) error { @@ -2520,11 +3224,11 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { a.root.SetExtraData(extraData) - err = a.Storage.Store(rootID, a.root) + err = storeSlab(a.Storage, a.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rootID)) + return err } + err = a.Storage.Remove(childID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. @@ -2534,77 +3238,311 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { return nil } -var emptyArrayIterator = &ArrayIterator{} +func (a *Array) Inlined() bool { + return a.root.Inlined() +} + +func (a *Array) Inlinable(maxInlineSize uint64) bool { + return a.root.Inlinable(maxInlineSize) +} + +// Storable returns array a as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { + + inlined := a.root.Inlined() + inlinable := a.root.Inlinable(maxInlineSize) + + switch { + case inlinable && inlined: + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return a.root, nil + + case !inlinable && !inlined: + // Root slab is not inlinable and was not inlined. + // Return root slab ID as storable, no size adjustment and change to storage. + return SlabIDStorable(a.SlabID()), nil + + case inlinable && !inlined: + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + err := a.root.Inline(a.Storage) + if err != nil { + return nil, err + } + + return a.root, nil + + case !inlinable && inlined: + + // Root slab is NOT inlinable and was previously inlined. + + // Uninline root slab. + err := a.root.Uninline(a.Storage) + if err != nil { + return nil, err + } + + return SlabIDStorable(a.SlabID()), nil + + default: + panic("not reachable") + } +} + +type ArrayIterator interface { + CanMutate() bool + Next() (Value, error) +} + +type emptyArrayIterator struct { + readOnly bool +} + +var _ ArrayIterator = &emptyArrayIterator{} + +var emptyMutableArrayIterator = &emptyArrayIterator{readOnly: false} +var emptyReadOnlyArrayIterator = &emptyArrayIterator{readOnly: true} + +func (i *emptyArrayIterator) CanMutate() bool { + return !i.readOnly +} + +func (*emptyArrayIterator) Next() (Value, error) { + return nil, nil +} + +type mutableArrayIterator struct { + array *Array + nextIndex uint64 + lastIndex uint64 // noninclusive index +} + +var _ ArrayIterator = &mutableArrayIterator{} + +func (i *mutableArrayIterator) CanMutate() bool { + return true +} + +func (i *mutableArrayIterator) Next() (Value, error) { + if i.nextIndex == i.lastIndex { + // No more elements. + return nil, nil + } + + // Don't need to set up notification callback for v because + // Get() returns value with notification already. + v, err := i.array.Get(i.nextIndex) + if err != nil { + return nil, err + } + + i.nextIndex++ + + return v, nil +} + +type ReadOnlyArrayIteratorMutationCallback func(mutatedValue Value) + +type readOnlyArrayIterator struct { + array *Array + dataSlab *ArrayDataSlab + indexInDataSlab uint64 + remainingCount uint64 // needed for range iteration + valueMutationCallback ReadOnlyArrayIteratorMutationCallback +} + +// defaultReadOnlyArrayIteratorMutatinCallback is no-op. +var defaultReadOnlyArrayIteratorMutatinCallback ReadOnlyArrayIteratorMutationCallback = func(Value) {} -type ArrayIterator struct { - storage SlabStorage - id SlabID - dataSlab *ArrayDataSlab - index int - remainingCount int +var _ ArrayIterator = &readOnlyArrayIterator{} + +func (i *readOnlyArrayIterator) setMutationCallback(value Value) { + if v, ok := value.(mutableValueNotifier); ok { + v.setParentUpdater(func() (found bool, err error) { + i.valueMutationCallback(value) + return true, NewReadOnlyIteratorElementMutationError(i.array.ValueID(), v.ValueID()) + }) + } +} + +func (i *readOnlyArrayIterator) CanMutate() bool { + return false } -func (i *ArrayIterator) Next() (Value, error) { +func (i *readOnlyArrayIterator) Next() (Value, error) { if i.remainingCount == 0 { return nil, nil } - if i.dataSlab == nil { - if i.id == SlabIDUndefined { + if i.indexInDataSlab >= uint64(len(i.dataSlab.elements)) { + // No more elements in current data slab. + + nextDataSlabID := i.dataSlab.next + + if nextDataSlabID == SlabIDUndefined { + // No more elements in array. return nil, nil } - slab, found, err := i.storage.Retrieve(i.id) + // Load next data slab. + slab, found, err := i.array.Storage.Retrieve(nextDataSlabID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", nextDataSlabID)) } if !found { - return nil, NewSlabNotFoundErrorf(i.id, "slab not found during array iteration") + return nil, NewSlabNotFoundErrorf(nextDataSlabID, "slab not found during array iteration") } i.dataSlab = slab.(*ArrayDataSlab) - i.index = 0 - } + i.indexInDataSlab = 0 - var element Value - var err error - if i.index < len(i.dataSlab.elements) { - element, err = i.dataSlab.elements[i.index].StoredValue(i.storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + // Check current data slab isn't empty because i.remainingCount > 0. + if len(i.dataSlab.elements) == 0 { + return nil, NewSlabDataErrorf("data slab contains 0 elements, expect more") } - - i.index++ } - if i.index >= len(i.dataSlab.elements) { - i.id = i.dataSlab.next - i.dataSlab = nil + // At this point: + // - There are elements to iterate in array (i.remainingCount > 0), and + // - There are elements to iterate in i.dataSlab (i.indexInDataSlab < len(i.dataSlab.elements)) + + element, err := i.dataSlab.elements[i.indexInDataSlab].StoredValue(i.array.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + i.indexInDataSlab++ i.remainingCount-- + i.setMutationCallback(element) + return element, nil } -func (a *Array) Iterator() (*ArrayIterator, error) { +// Iterator returns mutable iterator for array elements. +// Mutable iterator handles: +// - indirect element mutation, such as modifying nested container +// - direct element mutation, such as overwriting existing element with new element +// Mutable iterator doesn't handle: +// - inserting new elements into the array +// - removing existing elements from the array +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (a *Array) Iterator() (ArrayIterator, error) { + if a.Count() == 0 { + return emptyMutableArrayIterator, nil + } + + return &mutableArrayIterator{ + array: a, + lastIndex: a.Count(), + }, nil +} + +// ReadOnlyIterator returns readonly iterator for array elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyIteratorWithMutationCallback(). +func (a *Array) ReadOnlyIterator() (ArrayIterator, error) { + return a.ReadOnlyIteratorWithMutationCallback(nil) +} + +// ReadOnlyIteratorWithMutationCallback returns readonly iterator for array elements. +// valueMutationCallback is useful for logging, etc. with more context when mutation +// occurs. Mutation handling here is the same with or without callback. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyIterator(). +func (a *Array) ReadOnlyIteratorWithMutationCallback( + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) (ArrayIterator, error) { + if a.Count() == 0 { + return emptyReadOnlyArrayIterator, nil + } + slab, err := firstArrayDataSlab(a.Storage, a.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). return nil, err } - return &ArrayIterator{ - storage: a.Storage, - id: slab.SlabID(), - dataSlab: slab, - remainingCount: int(a.Count()), + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyArrayIteratorMutatinCallback + } + + return &readOnlyArrayIterator{ + array: a, + dataSlab: slab, + remainingCount: a.Count(), + valueMutationCallback: valueMutationCallback, + }, nil +} + +func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (ArrayIterator, error) { + count := a.Count() + + if startIndex > count || endIndex > count { + return nil, NewSliceOutOfBoundsError(startIndex, endIndex, 0, count) + } + + if startIndex > endIndex { + return nil, NewInvalidSliceIndexError(startIndex, endIndex) + } + + if endIndex == startIndex { + return emptyMutableArrayIterator, nil + } + + return &mutableArrayIterator{ + array: a, + nextIndex: startIndex, + lastIndex: endIndex, }, nil } -func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterator, error) { +// ReadOnlyRangeIterator iterates readonly array elements from +// specified startIndex to endIndex. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyRangeIteratorWithMutationCallback(). +func (a *Array) ReadOnlyRangeIterator( + startIndex uint64, + endIndex uint64, +) (ArrayIterator, error) { + return a.ReadOnlyRangeIteratorWithMutationCallback(startIndex, endIndex, nil) +} + +// ReadOnlyRangeIteratorWithMutationCallback iterates readonly array elements +// from specified startIndex to endIndex. +// valueMutationCallback is useful for logging, etc. with more context when +// mutation occurs. Mutation handling here is the same with or without callback. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyRangeIterator(). +func (a *Array) ReadOnlyRangeIteratorWithMutationCallback( + startIndex uint64, + endIndex uint64, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) (ArrayIterator, error) { count := a.Count() if startIndex > count || endIndex > count { @@ -2618,7 +3556,7 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato numberOfElements := endIndex - startIndex if numberOfElements == 0 { - return emptyArrayIterator, nil + return emptyReadOnlyArrayIterator, nil } var dataSlab *ArrayDataSlab @@ -2645,25 +3583,22 @@ func (a *Array) RangeIterator(startIndex uint64, endIndex uint64) (*ArrayIterato } } - return &ArrayIterator{ - storage: a.Storage, - id: dataSlab.SlabID(), - dataSlab: dataSlab, - index: int(index), - remainingCount: int(numberOfElements), + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyArrayIteratorMutatinCallback + } + + return &readOnlyArrayIterator{ + array: a, + dataSlab: dataSlab, + indexInDataSlab: index, + remainingCount: numberOfElements, + valueMutationCallback: valueMutationCallback, }, nil } type ArrayIterationFunc func(element Value) (resume bool, err error) -func (a *Array) Iterate(fn ArrayIterationFunc) error { - - iterator, err := a.Iterator() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). - return err - } - +func iterateArray(iterator ArrayIterator, fn ArrayIterationFunc) error { for { value, err := iterator.Next() if err != nil { @@ -2684,49 +3619,112 @@ func (a *Array) Iterate(fn ArrayIterationFunc) error { } } -func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { +func (a *Array) Iterate(fn ArrayIterationFunc) error { + iterator, err := a.Iterator() + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.Iterator(). + return err + } + return iterateArray(iterator, fn) +} + +// IterateReadOnly iterates readonly array elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyWithMutationCallback(). +func (a *Array) IterateReadOnly(fn ArrayIterationFunc) error { + return a.IterateReadOnlyWithMutationCallback(fn, nil) +} + +// IterateReadOnlyWithMutationCallback iterates readonly array elements. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnly(). +func (a *Array) IterateReadOnlyWithMutationCallback( + fn ArrayIterationFunc, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) error { + iterator, err := a.ReadOnlyIteratorWithMutationCallback(valueMutationCallback) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyIterator(). + return err + } + return iterateArray(iterator, fn) +} +func (a *Array) IterateRange(startIndex uint64, endIndex uint64, fn ArrayIterationFunc) error { iterator, err := a.RangeIterator(startIndex, endIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.RangeIterator(). return err } + return iterateArray(iterator, fn) +} - for { - value, err := iterator.Next() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ArrayIterator.Next(). - return err - } - if value == nil { - return nil - } - resume, err := fn(value) - if err != nil { - // Wrap err as external error (if needed) because err is returned by ArrayIterationFunc callback. - return wrapErrorAsExternalErrorIfNeeded(err) - } - if !resume { - return nil - } +// IterateReadOnlyRange iterates readonly array elements from specified startIndex to endIndex. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyRangeWithMutatinoCallback(). +func (a *Array) IterateReadOnlyRange( + startIndex uint64, + endIndex uint64, + fn ArrayIterationFunc, +) error { + return a.IterateReadOnlyRangeWithMutationCallback(startIndex, endIndex, fn, nil) +} + +// IterateReadOnlyRangeWithMutationCallback iterates readonly array elements +// from specified startIndex to endIndex. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - valueMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyRange(). +func (a *Array) IterateReadOnlyRangeWithMutationCallback( + startIndex uint64, + endIndex uint64, + fn ArrayIterationFunc, + valueMutationCallback ReadOnlyArrayIteratorMutationCallback, +) error { + iterator, err := a.ReadOnlyRangeIteratorWithMutationCallback(startIndex, endIndex, valueMutationCallback) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by Array.ReadOnlyRangeIterator(). + return err } + return iterateArray(iterator, fn) } + func (a *Array) Count() uint64 { return uint64(a.root.Header().count) } func (a *Array) SlabID() SlabID { + if a.root.Inlined() { + return SlabIDUndefined + } return a.root.SlabID() } func (a *Array) ValueID() ValueID { - sid := a.SlabID() - - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) - - return id + return slabIDToValueID(a.root.SlabID()) } func (a *Array) Type() TypeInfo { @@ -2736,8 +3734,27 @@ func (a *Array) Type() TypeInfo { return nil } +func (a *Array) SetType(typeInfo TypeInfo) error { + extraData := a.root.ExtraData() + extraData.TypeInfo = typeInfo + + a.root.SetExtraData(extraData) + + if a.Inlined() { + // Array is inlined. + + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + return a.notifyParentIfNeeded() + } + + // Array is standalone. + + // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. + return storeSlab(a.Storage, a.root) +} + func (a *Array) String() string { - iterator, err := a.Iterator() + iterator, err := a.ReadOnlyIterator() if err != nil { return err.Error() } @@ -2774,18 +3791,23 @@ func getArraySlab(storage SlabStorage, id SlabID) (ArraySlab, error) { } func firstArrayDataSlab(storage SlabStorage, slab ArraySlab) (*ArrayDataSlab, error) { - if slab.IsData() { - return slab.(*ArrayDataSlab), nil - } - meta := slab.(*ArrayMetaDataSlab) - firstChildID := meta.childrenHeaders[0].slabID - firstChild, err := getArraySlab(storage, firstChildID) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return nil, err + switch slab := slab.(type) { + case *ArrayDataSlab: + return slab, nil + + case *ArrayMetaDataSlab: + firstChildID := slab.childrenHeaders[0].slabID + firstChild, err := getArraySlab(storage, firstChildID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return nil, err + } + // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). + return firstArrayDataSlab(storage, firstChild) + + default: + return nil, NewUnreachableError() } - // Don't need to wrap error as external error because err is already categorized by firstArrayDataSlab(). - return firstArrayDataSlab(storage, firstChild) } // getArrayDataSlabWithIndex returns data slab containing element at specified index @@ -2831,20 +3853,29 @@ func (a *Array) PopIterate(fn ArrayPopIterationFunc) error { extraData := a.root.ExtraData() + inlined := a.root.Inlined() + + size := uint32(arrayRootDataSlabPrefixSize) + if inlined { + size = inlinedArrayDataSlabPrefixSize + } + // Set root to empty data slab a.root = &ArrayDataSlab{ header: ArraySlabHeader{ slabID: rootID, - size: arrayRootDataSlabPrefixSize, + size: size, }, extraData: extraData, + inlined: inlined, } // Save root slab - err = a.Storage.Store(a.root.SlabID(), a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + if !a.Inlined() { + err = storeSlab(a.Storage, a.root) + if err != nil { + return err + } } return nil @@ -2968,12 +3999,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn // Store all slabs for _, slab := range slabs { - err = storage.Store(slab.SlabID(), slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded( - err, - fmt.Sprintf("failed to store slab %s", slab.SlabID())) + return nil, err } } @@ -3000,10 +4028,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn root.SetExtraData(extraData) // Store root - err = storage.Store(root.SlabID(), root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.SlabID())) + return nil, err } return &Array{ @@ -3210,8 +4237,8 @@ func (i *ArrayLoadedValueIterator) Next() (Value, error) { return nil, nil } -// LoadedValueIterator returns iterator to iterate loaded array elements. -func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded array elements. +func (a *Array) ReadOnlyLoadedValueIterator() (*ArrayLoadedValueIterator, error) { switch slab := a.root.(type) { case *ArrayDataSlab: @@ -3249,9 +4276,9 @@ func (a *Array) LoadedValueIterator() (*ArrayLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded array values. -func (a *Array) IterateLoadedValues(fn ArrayIterationFunc) error { - iterator, err := a.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded array values. +func (a *Array) IterateReadOnlyLoadedValues(fn ArrayIterationFunc) error { + iterator, err := a.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by Array.LoadedValueIterator(). return err diff --git a/array_bench_test.go b/array_bench_test.go index 13c37c4..d3a3588 100644 --- a/array_bench_test.go +++ b/array_bench_test.go @@ -355,7 +355,7 @@ func benchmarkNewArrayFromAppend(b *testing.B, initialArraySize int) { for i := 0; i < b.N; i++ { copied, _ := NewArray(storage, array.Address(), array.Type()) - _ = array.Iterate(func(value Value) (bool, error) { + _ = array.IterateReadOnly(func(value Value) (bool, error) { _ = copied.Append(value) return true, nil }) @@ -379,7 +379,7 @@ func benchmarkNewArrayFromBatchData(b *testing.B, initialArraySize int) { b.StartTimer() for i := 0; i < b.N; i++ { - iter, err := array.Iterator() + iter, err := array.ReadOnlyIterator() require.NoError(b, err) copied, _ := NewArrayFromBatchData(storage, array.Address(), array.Type(), func() (Value, error) { diff --git a/array_debug.go b/array_debug.go index 3d28455..89a1702 100644 --- a/array_debug.go +++ b/array_debug.go @@ -63,16 +63,14 @@ func GetArrayStats(a *Array) (ArrayStats, error) { return ArrayStats{}, err } - if slab.IsData() { + switch slab.(type) { + case *ArrayDataSlab: dataSlabCount++ - childStorables := slab.ChildStorables() - for _, s := range childStorables { - if _, ok := s.(SlabIDStorable); ok { - storableSlabCount++ - } - } - } else { + ids := getSlabIDFromStorable(slab, nil) + storableSlabCount += uint64(len(ids)) + + case *ArrayMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -130,20 +128,14 @@ func DumpArraySlabs(a *Array) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*ArrayDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *ArrayDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*ArrayMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *ArrayMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -175,8 +167,29 @@ func DumpArraySlabs(a *Array) ([]string, error) { type TypeInfoComparator func(TypeInfo, TypeInfo) bool -func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func VerifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyArray(a, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyArray(a *Array, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { + // Verify array address (independent of array inlined status) + if address != a.Address() { + return NewFatalError(fmt.Errorf("array address %v, got %v", address, a.Address())) + } + + // Verify array value ID (independent of array inlined status) + err := verifyArrayValueID(a) + if err != nil { + return err + } + // Verify array slab ID (dependent of array inlined status) + err = verifyArraySlabID(a) + if err != nil { + return err + } + + // Verify array extra data extraData := a.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", a.root.SlabID())) @@ -192,10 +205,18 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp )) } - computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(tic, hip, a.Storage, a.root.Header().slabID, 0, nil, []SlabID{}, []SlabID{}) + v := &arrayVerifier{ + storage: a.Storage, + address: address, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + // Verify array slabs + computedCount, dataSlabIDs, nextDataSlabIDs, err := v.verifySlab(a.root, 0, nil, []SlabID{}, []SlabID{}, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -213,134 +234,242 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp return nil } -func validArraySlab( - tic TypeInfoComparator, - hip HashInputProvider, - storage SlabStorage, - id SlabID, +type arrayVerifier struct { + storage SlabStorage + address Address + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +// verifySlab verifies ArraySlab in memory which can be inlined or not inlined. +func (v *arrayVerifier) verifySlab( + slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, ) ( elementCount uint32, _dataSlabIDs []SlabID, _nextDataSlabIDs []SlabID, err error, ) { + id := slab.Header().slabID - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return 0, nil, nil, err + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + + // Verify slab address (independent of array inlined status) + if v.address != id.address { + return 0, nil, nil, NewFatalError(fmt.Errorf("array slab address %v, got %v", v.address, id.address)) + } + + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } } if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %d has extra data", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s has extra data", id)) } // Verify that non-root slab doesn't underflow if underflowSize, underflow := slab.IsUnderflow(); underflow { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d underflows by %d bytes", id, underflowSize)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s underflows by %d bytes", id, underflowSize)) } } // Verify that slab doesn't overflow if slab.IsFull() { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d overflows", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s overflows", id)) } // Verify that header is in sync with header from parent slab if headerFromParentSlab != nil { if !reflect.DeepEqual(*headerFromParentSlab, slab.Header()) { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d header %+v is different from header %+v from parent slab", + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s header %+v is different from header %+v from parent slab", id, slab.Header(), headerFromParentSlab)) } } - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) - } + switch slab := slab.(type) { + case *ArrayDataSlab: + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) + + case *ArrayMetaDataSlab: + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, slabIDs) + + default: + return 0, nil, nil, NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } +} - // Verify that element count is the same as header.count - if uint32(len(dataSlab.elements)) != dataSlab.header.count { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header count %d is wrong, want %d", - id, dataSlab.header.count, len(dataSlab.elements))) +func (v *arrayVerifier) verifyDataSlab( + dataSlab *ArrayDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := dataSlab.header.slabID + + if !dataSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayDataSlab %s is not data", id)) + } + + // Verify that element count is the same as header.count + if uint32(len(dataSlab.elements)) != dataSlab.header.count { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", + id, dataSlab.header.count, len(dataSlab.elements))) + } + + // Verify that only root data slab can be inlined + if dataSlab.Inlined() { + if level > 0 { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + if dataSlab.extraData == nil { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s doesn't have extra data", id)) + } + if dataSlab.next != SlabIDUndefined { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s has next slab ID", id)) } + } - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(arrayDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(arrayRootDataSlabPrefixSize) + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(arrayDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(arrayRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedArrayDataSlabPrefixSize) } - for _, e := range dataSlab.elements { + } - // Verify element size is <= inline size - if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d element %s size %d is too large, want < %d", - id, e, e.ByteSize(), maxInlineArrayElementSize)) - } + for _, e := range dataSlab.elements { + computedSize += e.ByteSize() + } - computedSize += e.ByteSize() + if computedSize != dataSlab.header.size { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } + + dataSlabIDs = append(dataSlabIDs, id) + + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + } + + for _, e := range dataSlab.elements { + + value, err := e.StoredValue(v.storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, + fmt.Sprintf( + "data slab %s element %s can't be converted to value", + id, e, + )) } - if computedSize != dataSlab.header.size { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) + // Verify element size <= inline size + if e.ByteSize() > uint32(maxInlineArrayElementSize) { + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", + id, e, e.ByteSize(), maxInlineArrayElementSize)) } - dataSlabIDs = append(dataSlabIDs, id) + switch e := e.(type) { + case SlabIDStorable: + // Verify not-inlined element > inline size, or can't be inlined + if v.inlineEnabled { + err = verifyNotInlinedValueStatusAndSize(value, uint32(maxInlineArrayElementSize)) + if err != nil { + return 0, nil, nil, err + } + } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } } // Verify element - for _, e := range dataSlab.elements { - v, err := e.StoredValue(storage) - if err != nil { - // Wrap err as external error (if needed) because err is returned by Storable interface. - return 0, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, - fmt.Sprintf( - "data slab %s element %s can't be converted to value", - id, e, - )) - } - err = ValidValue(v, nil, tic, hip) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). - return 0, nil, nil, fmt.Errorf( - "data slab %d element %s isn't valid: %w", - id, e, err, - ) - } + err = verifyValue(value, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyValue(). + return 0, nil, nil, fmt.Errorf( + "data slab %s element %q isn't valid: %w", + id, e, err, + ) } + } + + return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil +} + +func (v *arrayVerifier) verifyMetaDataSlab( + metaSlab *ArrayMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint32, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + err error, +) { + id := metaSlab.header.slabID - return dataSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil + if metaSlab.IsData() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s is data", id)) } - meta, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) + if metaSlab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("ArrayMetaDataSlab %s shouldn't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, NewFatalError(fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } // Verify childrenCountSum - if len(meta.childrenCountSum) != len(meta.childrenHeaders) { + if len(metaSlab.childrenCountSum) != len(metaSlab.childrenHeaders) { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d has %d childrenCountSum, want %d", - id, len(meta.childrenCountSum), len(meta.childrenHeaders))) + id, len(metaSlab.childrenCountSum), len(metaSlab.childrenHeaders))) } computedCount := uint32(0) @@ -348,48 +477,54 @@ func validArraySlab( // If we use range, then h would be a temporary object and we'd be passing address of // temporary object to function, which can lead to bugs depending on usage. It's not a bug // with the current usage but it's less fragile to future changes by not using range here. - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] + + childSlab, err := getArraySlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return 0, nil, nil, err + } // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(tic, hip, storage, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, err } computedCount += count // Verify childrenCountSum - if meta.childrenCountSum[i] != computedCount { + if metaSlab.childrenCountSum[i] != computedCount { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d childrenCountSum[%d] is %d, want %d", - id, i, meta.childrenCountSum[i], computedCount)) + id, i, metaSlab.childrenCountSum[i], computedCount)) } } // Verify that aggregated element count is the same as header.count - if computedCount != meta.header.count { + if computedCount != metaSlab.header.count { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header count %d is wrong, want %d", - id, meta.header.count, computedCount)) + id, metaSlab.header.count, computedCount)) } // Verify that aggregated header size + slab prefix is the same as header.size - computedSize := uint32(len(meta.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*arraySlabHeaderSize) + arrayMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } - return meta.header.count, dataSlabIDs, nextDataSlabIDs, nil + return metaSlab.header.count, dataSlabIDs, nextDataSlabIDs, nil } -// ValidArraySerialization traverses array tree and verifies serialization +// VerifyArraySerialization traverses array tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidArraySerialization( +func VerifyArraySerialization( a *Array, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -397,149 +532,142 @@ func ValidArraySerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validArraySlabSerialization( - a.Storage, - a.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + // Skip verification of inlined array serialization. + if a.Inlined() { + return nil + } + + v := &serializationVerifier{ + storage: a.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyArraySlab(a.root) } -func validArraySlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +type serializationVerifier struct { + storage SlabStorage + cborDecMode cbor.DecMode + cborEncMode cbor.EncMode + decodeStorable StorableDecoder + decodeTypeInfo TypeInfoDecoder + compare StorableComparator +} - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return err - } +// verifySlab verifies serialization of not inlined ArraySlab. +func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error { + + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := EncodeSlab(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := EncodeSlab(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined compact map because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // compact map extra data section for reuse, and only compact map field + // values are encoded in non-extra data section. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) - } - - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError(fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*ArrayDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError(fmt.Errorf("slab %s encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) } + } + switch slab := slab.(type) { + case *ArrayDataSlab: decodedDataSlab, ok := decodedSlab.(*ArrayDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayDataSlab", id)) } // Compare slabs - err = arrayDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.arrayDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by arrayDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not ArrayMetaDataSlab", id)) - } + case *ArrayMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*ArrayMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not ArrayMetaDataSlab", id)) - } + // Compare slabs + err = v.arrayMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = arrayMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by arrayMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + childSlab, err := getArraySlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validArraySlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validArraySlabSerialization(). - return err + // Verify child slabs + err = v.verifyArraySlab(childSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyArraySlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("ArraySlab is either *ArrayDataSlab or *ArrayMetaDataSlab, got %T", slab)) + } } -func arrayDataSlabEqual( - expected *ArrayDataSlab, - actual *ArrayDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) arrayDataSlabEqual(expected, actual *ArrayDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -548,6 +676,11 @@ func arrayDataSlabEqual( return err } + // Compare inlined status + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -567,34 +700,49 @@ func arrayDataSlabEqual( for i := 0; i < len(expected.elements); i++ { ee := expected.elements[i] ae := actual.elements[i] - if !compare(ee, ae) { - return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) - } - // Compare nested element - if idStorable, ok := ee.(SlabIDStorable); ok { + switch ee := ee.(type) { + + case SlabIDStorable: // Compare not-inlined element + if !v.compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } - ev, err := idStorable.StoredValue(storage) + ev, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - return ValidValueSerialization( - ev, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyValue(ev) + + case *ArrayDataSlab: // Compare inlined array + ae, ok := ae.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as inlined *ArrayDataSlab, actual %T", ae)) + } + + return v.arrayDataSlabEqual(ee, ae) + + case *MapDataSlab: // Compare inlined map + ae, ok := ae.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as inlined *MapDataSlab, actual %T", ae)) + } + + return v.mapDataSlabEqual(ee, ae) + + default: + if !v.compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } } } return nil } -func arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { +func (v *serializationVerifier) arrayMetaDataSlabEqual(expected, actual *ArrayMetaDataSlab) error { // Compare extra data err := arrayExtraDataEqual(expected.extraData, actual.extraData) @@ -638,39 +786,19 @@ func arrayExtraDataEqual(expected, actual *ArrayExtraData) error { return nil } -func ValidValueSerialization( - value Value, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyValue(value Value) error { - switch v := value.(type) { + switch value := value.(type) { case *Array: - return ValidArraySerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyArraySlab(value.root) + case *OrderedMap: - return ValidMapSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.verifyMapSlab(value.root) } return nil } -func computeSlabSize(data []byte) (int, error) { +func computeSize(data []byte) (int, error) { if len(data) < versionAndFlagSize { return 0, NewDecodingError(fmt.Errorf("data is too short")) } @@ -680,20 +808,23 @@ func computeSlabSize(data []byte) (int, error) { return 0, NewDecodingError(err) } - slabExtraDataSize, err := getExtraDataSize(h, data[versionAndFlagSize:]) + slabExtraDataSize, inlinedSlabExtrDataSize, err := getExtraDataSizes(h, data[versionAndFlagSize:]) if err != nil { return 0, err } - // Computed slab size (slab header size): - // - excludes slab extra data size - // - adds next slab ID for non-root data slab if not encoded - size := len(data) - slabExtraDataSize - isDataSlab := h.getSlabArrayType() == slabArrayData || h.getSlabMapType() == slabMapData || h.getSlabMapType() == slabMapCollisionGroup + // computed size (slab header size): + // - excludes slab extra data size + // - excludes inlined slab extra data size + // - adds next slab ID for non-root data slab if not encoded + size := len(data) + size -= slabExtraDataSize + size -= inlinedSlabExtrDataSize + if !h.isRoot() && isDataSlab && !h.hasNextSlabID() { size += slabIDSize } @@ -701,15 +832,229 @@ func computeSlabSize(data []byte) (int, error) { return size, nil } -func getExtraDataSize(h head, data []byte) (int, error) { +func hasInlinedComposite(data []byte) (bool, error) { + if len(data) < versionAndFlagSize { + return false, NewDecodingError(fmt.Errorf("data is too short")) + } + + h, err := newHeadFromData(data[:versionAndFlagSize]) + if err != nil { + return false, NewDecodingError(err) + } + + if !h.hasInlinedSlabs() { + return false, nil + } + + data = data[versionAndFlagSize:] + + // Skip slab extra data if needed. + if h.isRoot() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return false, NewDecodingError(err) + } + + data = data[len(b):] + } + + // Parse inlined extra data to find compact map extra data. + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + + count, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + if count != inlinedExtraDataArrayCount { + return false, NewDecodingError(fmt.Errorf("failed to decode inlined extra data, expect %d elements, got %d elements", inlinedExtraDataArrayCount, count)) + } + + // Skip element 0 (inlined type info) + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + + // Decoding element 1 (inlined extra data) + extraDataCount, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + for i := uint64(0); i < extraDataCount; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return false, NewDecodingError(err) + } + if tagNum == CBORTagInlinedCompactMapExtraData { + return true, nil + } + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + } + + return false, nil +} + +func getExtraDataSizes(h head, data []byte) (int, int, error) { + + var slabExtraDataSize, inlinedSlabExtraDataSize int + if h.isRoot() { dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) b, err := dec.DecodeRawBytes() if err != nil { - return 0, NewDecodingError(err) + return 0, 0, NewDecodingError(err) + } + slabExtraDataSize = len(b) + + data = data[slabExtraDataSize:] + } + + if h.hasInlinedSlabs() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + inlinedSlabExtraDataSize = len(b) + } + + return slabExtraDataSize, inlinedSlabExtraDataSize, nil +} + +// getSlabIDFromStorable appends slab IDs from storable to ids. +// This function traverses child storables. If child storable +// is inlined map or array, inlined map or array is also traversed. +func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { + childStorables := storable.ChildStorables() + + for _, e := range childStorables { + switch e := e.(type) { + case SlabIDStorable: + ids = append(ids, SlabID(e)) + + case *ArrayDataSlab: + ids = getSlabIDFromStorable(e, ids) + + case *MapDataSlab: + ids = getSlabIDFromStorable(e, ids) + } + } + + return ids +} + +// verifyArrayValueID verifies array ValueID is always the same as +// root slab's SlabID indepedent of array's inlined status. +func verifyArrayValueID(a *Array) error { + rootSlabID := a.root.Header().slabID + + vid := a.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) + } + + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// verifyArraySlabID verifies array SlabID is either empty for inlined array, or +// same as root slab's SlabID for not-inlined array. +func verifyArraySlabID(a *Array) error { + sid := a.SlabID() + + if a.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := a.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) + } + + return nil +} + +func verifyNotInlinedValueStatusAndSize(v Value, maxInlineSize uint32) error { + + switch v := v.(type) { + case *Array: + // Verify not-inlined array's inlined status + if v.root.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined array %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined array size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - arrayRootDataSlabPrefixSize + inlinedArrayDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined array root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } + } + + case *OrderedMap: + // Verify not-inlined map's inlined status + if v.Inlined() { + return NewFatalError( + fmt.Errorf( + "not-inlined map %s has inlined status", + v.root.Header().slabID)) + } + + // Verify not-inlined map size. + if v.root.IsData() { + inlinableSize := v.root.ByteSize() - mapRootDataSlabPrefixSize + inlinedMapDataSlabPrefixSize + if inlinableSize <= maxInlineSize { + return NewFatalError( + fmt.Errorf("not-inlined map root slab %s can be inlined, inlinable size %d <= max inline size %d", + v.root.Header().slabID, + inlinableSize, + maxInlineSize)) + } } - return len(b), nil } - return 0, nil + return nil } diff --git a/array_test.go b/array_test.go index 721b83f..172d02c 100644 --- a/array_test.go +++ b/array_test.go @@ -23,24 +23,46 @@ import ( "math" "math/rand" "reflect" + "runtime" "strings" "testing" "github.com/stretchr/testify/require" ) -func verifyEmptyArray( +func testEmptyArrayV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, array *Array, ) { - verifyArray(t, storage, typeInfo, address, array, nil, false) + testArrayV0(t, storage, typeInfo, address, array, nil, false) } -// verifyArray verifies array elements and validates serialization and in-memory slab tree. -func verifyArray( +func testEmptyArray( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, +) { + testArray(t, storage, typeInfo, address, array, nil, false) +} + +func testArrayV0( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + values []Value, + hasNestedArrayMapElement bool, +) { + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, false) +} + +func testArray( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -48,40 +70,54 @@ func verifyArray( array *Array, values []Value, hasNestedArrayMapElement bool, +) { + _testArray(t, storage, typeInfo, address, array, values, hasNestedArrayMapElement, true) +} + +// _testArray tests array elements, serialization, and in-memory slab tree. +func _testArray( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + array *Array, + expectedValues arrayValue, + hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, array.Type())) require.Equal(t, address, array.Address()) - require.Equal(t, uint64(len(values)), array.Count()) + require.Equal(t, uint64(len(expectedValues)), array.Count()) var err error // Verify array elements - for i, v := range values { - e, err := array.Get(uint64(i)) + for i, expected := range expectedValues { + actual, err := array.Get(uint64(i)) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify array elements by iterator i := 0 - err = array.Iterate(func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, values[i], v) + err = array.IterateReadOnly(func(v Value) (bool, error) { + valueEqual(t, expectedValues[i], v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(values), i) + require.Equal(t, len(expectedValues), i) // Verify in-memory slabs - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintArray(array) } require.NoError(t, err) // Verify slab serializations - err = ValidArraySerialization( + err = VerifyArraySerialization( array, storage.cborDecMode, storage.cborEncMode, @@ -107,6 +143,30 @@ func verifyArray( require.Equal(t, 1, len(rootIDs)) require.Equal(t, array.SlabID(), rootIDs[0]) + // Encode all non-nil slab + encodedSlabs := make(map[SlabID][]byte) + for id, slab := range storage.deltas { + if slab != nil { + b, err := EncodeSlab(slab, storage.cborEncMode) + require.NoError(t, err) + encodedSlabs[id] = b + } + } + + // Test decoded array from new storage to force slab decoding + decodedArray, err := NewArrayWithRootID( + newTestPersistentStorageWithBaseStorageAndDeltas(t, storage.baseStorage, encodedSlabs), + array.SlabID()) + require.NoError(t, err) + + // Verify decoded array elements + for i, expected := range expectedValues { + actual, err := decodedArray.Get(uint64(i)) + require.NoError(t, err) + + valueEqual(t, expected, actual) + } + if !hasNestedArrayMapElement { // Need to call Commit before calling storage.Count() for PersistentSlabStorage. err = storage.Commit() @@ -116,7 +176,7 @@ func verifyArray( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(values) == 0 { + if len(expectedValues) == 0 { // Verify slab count for empty array require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -160,7 +220,7 @@ func TestArrayAppendAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } func TestArraySetAndGet(t *testing.T) { @@ -183,7 +243,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -195,10 +255,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs splitting and root slab reassignment caused by Set operation. @@ -229,7 +289,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -241,10 +301,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) // This tests slabs merging and root slab reassignment caused by Set operation. @@ -276,7 +336,7 @@ func TestArraySetAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) for i := uint64(0); i < arraySize; i++ { oldValue := values[i] @@ -288,10 +348,10 @@ func TestArraySetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -326,7 +386,7 @@ func TestArraySetAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -354,7 +414,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert-last", func(t *testing.T) { @@ -376,7 +436,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("insert", func(t *testing.T) { @@ -409,7 +469,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.NoError(t, err) } - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -443,7 +503,7 @@ func TestArrayInsertAndGet(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBoundsError) require.ErrorAs(t, userError, &indexOutOfBoundsError) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } @@ -481,7 +541,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -491,11 +551,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, arraySize-i-1, array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[i+1:], false) + testArray(t, storage, typeInfo, address, array, values[i+1:], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove-last", func(t *testing.T) { @@ -528,7 +588,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[i], existingValue) + valueEqual(t, values[i], existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -538,11 +598,11 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(i), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values[:i], false) + testArray(t, storage, typeInfo, address, array, values[:i], false) } } - verifyEmptyArray(t, storage, typeInfo, address, array) + testEmptyArray(t, storage, typeInfo, address, array) }) t.Run("remove", func(t *testing.T) { @@ -578,7 +638,7 @@ func TestArrayRemove(t *testing.T) { existingValue, err := existingStorable.StoredValue(array.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + valueEqual(t, v, existingValue) if id, ok := existingStorable.(SlabIDStorable); ok { err = array.Storage.Remove(SlabID(id)) @@ -591,13 +651,13 @@ func TestArrayRemove(t *testing.T) { require.Equal(t, uint64(len(values)), array.Count()) if i%256 == 0 { - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) } } require.Equal(t, arraySize/2, len(values)) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) t.Run("index out of bounds", func(t *testing.T) { @@ -629,11 +689,11 @@ func TestArrayRemove(t *testing.T) { require.ErrorAs(t, err, &indexOutOfBounds) require.ErrorAs(t, userError, &indexOutOfBounds) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, values, false) }) } -func TestArrayIterate(t *testing.T) { +func TestReadOnlyArrayIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { typeInfo := testTypeInfo{42} @@ -644,7 +704,7 @@ func TestArrayIterate(t *testing.T) { require.NoError(t, err) i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { i++ return true, nil }) @@ -671,7 +731,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -708,7 +768,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -741,7 +801,7 @@ func TestArrayIterate(t *testing.T) { } i := uint64(0) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(i), v) i++ return true, nil @@ -777,7 +837,7 @@ func TestArrayIterate(t *testing.T) { i := uint64(0) j := uint64(1) - err = array.Iterate(func(v Value) (bool, error) { + err = array.IterateReadOnly(func(v Value) (bool, error) { require.Equal(t, Uint64Value(j), v) i++ j += 2 @@ -803,7 +863,7 @@ func TestArrayIterate(t *testing.T) { } i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, nil } @@ -832,7 +892,7 @@ func TestArrayIterate(t *testing.T) { testErr := errors.New("test") i := 0 - err = array.Iterate(func(_ Value) (bool, error) { + err = array.IterateReadOnly(func(_ Value) (bool, error) { if i == count/2 { return false, testErr } @@ -849,273 +909,462 @@ func TestArrayIterate(t *testing.T) { }) } -func testArrayIterateRange(t *testing.T, array *Array, values []Value) { - var i uint64 - var err error - var sliceOutOfBoundsError *SliceOutOfBoundsError - var invalidSliceIndexError *InvalidSliceIndexError +func TestMutateElementFromReadOnlyArrayIterator(t *testing.T) { - count := array.Count() + SetThreshold(256) + defer SetThreshold(1024) - // If startIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(count+1, count+1, func(v Value) (bool, error) { - i++ - return true, nil + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + var mutationError *ReadOnlyIteratorElementMutationError + + t.Run("mutate inlined element from IterateReadOnly", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.True(t, c.Inlined()) + + err = c.Append(Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) }) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &sliceOutOfBoundsError) - require.ErrorAs(t, userError, &sliceOutOfBoundsError) - require.Equal(t, uint64(0), i) + t.Run("mutate inlined element from IterateReadOnlyRange", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // If endIndex > count, IterateRange returns SliceOutOfBoundsError - err = array.IterateRange(0, count+1, func(v Value) (bool, error) { - i++ - return true, nil + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyRangeWithMutationCallback( + 0, + parentArray.Count(), + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.True(t, c.Inlined()) + + err = c.Append(Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) }) - require.Equal(t, 1, errorCategorizationCount(err)) - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &sliceOutOfBoundsError) - require.ErrorAs(t, userError, &sliceOutOfBoundsError) - require.Equal(t, uint64(0), i) - // If startIndex > endIndex, IterateRange returns InvalidSliceIndexError - if count > 0 { - err = array.IterateRange(1, 0, func(v Value) (bool, error) { - i++ - return true, nil - }) - require.Equal(t, 1, errorCategorizationCount(err)) - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &invalidSliceIndexError) - require.ErrorAs(t, userError, &invalidSliceIndexError) - require.Equal(t, uint64(0), i) - } + t.Run("mutate not inlined array element from IterateReadOnly", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // IterateRange returns no error and iteration function is called on sliced array - for startIndex := uint64(0); startIndex <= count; startIndex++ { - for endIndex := startIndex; endIndex <= count; endIndex++ { - i = uint64(0) - err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { - valueEqual(t, typeInfoComparator, v, values[int(startIndex+i)]) - i++ - return true, nil + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Inserting elements into childArray so it can't be inlined + for i := 0; childArray.Inlined(); i++ { + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + } + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingStorable, err := c.Remove(0) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined array element from IterateReadOnlyRange", func(t *testing.T) { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // child array [] + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + // parent array [[]] + err = parentArray.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + // Inserting elements into childArray so it can't be inlined + for i := 0; childArray.Inlined(); i++ { + v := Uint64Value(i) + err = childArray.Append(v) require.NoError(t, err) - require.Equal(t, endIndex-startIndex, i) } - } + + // Iterate and modify element + var valueMutationCallbackCalled bool + err = parentArray.IterateReadOnlyRangeWithMutationCallback( + 0, + parentArray.Count(), + func(v Value) (resume bool, err error) { + c, ok := v.(*Array) + require.True(t, ok) + require.False(t, c.Inlined()) + + existingStorable, err := c.Remove(0) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) + + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) } -func TestArrayIterateRange(t *testing.T) { - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} +func TestMutableArrayIterate(t *testing.T) { t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - testArrayIterateRange(t, array, []Value{}) + i := uint64(0) + err = array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) }) - t.Run("dataslab as root", func(t *testing.T) { - const arraySize = 10 + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + const arraySize = 15 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - value := Uint64Value(i) - values[i] = value - err := array.Append(value) + v := Uint64Value(i) + err = array.Append(v) require.NoError(t, err) + + expectedValues[i] = v } + require.True(t, array.root.IsData()) - testArrayIterateRange(t, array, values) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, Uint64Value(i), v) + + // Mutate primitive array elements by overwritting existing elements of similar byte size. + newValue := Uint64Value(i * 2) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i), existingValue) + + expectedValues[i] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("metadataslab as root", func(t *testing.T) { + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) const arraySize = 1024 + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - value := Uint64Value(i) - values[i] = value - err := array.Append(value) + v := Uint64Value(i) + err = array.Append(v) require.NoError(t, err) - } - - testArrayIterateRange(t, array, values) - }) - t.Run("stop", func(t *testing.T) { - const arraySize = 10 + expectedValues[i] = v + } + require.False(t, array.root.IsData()) - storage := newTestPersistentStorage(t) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, Uint64Value(i), v) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Mutate primitive array elements by overwritting existing elements with elements of similar size. + newValue := Uint64Value(i * 2) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - } + require.Equal(t, Uint64Value(i), existingValue) + + expectedValues[i] = newValue - i := uint64(0) - startIndex := uint64(1) - endIndex := uint64(5) - count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { - if i == count/2 { - return false, nil - } i++ + return true, nil }) require.NoError(t, err) - require.Equal(t, count/2, i) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("error", func(t *testing.T) { + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - const arraySize = 10 + expectedValues := make([]Value, arraySize) + r := rune('a') for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + v := NewStringValue(string(r)) + err = array.Append(v) require.NoError(t, err) + + expectedValues[i] = v + r++ } + require.True(t, array.root.IsData()) - testErr := errors.New("test") + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(string(r)), v) + + // Mutate primitive array elements by overwritting existing elements with larger elements. + // Larger elements causes slabs to split. + newValue := NewStringValue(strings.Repeat(string(r), 25)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(string(r)), existingValue) + + expectedValues[i] = newValue + + i++ + r++ - i := uint64(0) - startIndex := uint64(1) - endIndex := uint64(5) - count := endIndex - startIndex - err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { - if i == count/2 { - return false, testErr - } - i++ return true, nil }) - // err is testErr wrapped in ExternalError. - require.Equal(t, 1, errorCategorizationCount(err)) - var externalError *ExternalError - require.ErrorAs(t, err, &externalError) - require.Equal(t, testErr, externalError.Unwrap()) - require.Equal(t, count/2, i) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) -} -func TestArrayRootSlabID(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - const arraySize = 4096 + const arraySize = 200 - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - savedRootID := array.SlabID() - require.NotEqual(t, SlabIDUndefined, savedRootID) + expectedValues := make([]Value, arraySize) + r := rune('a') + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(string(r)) + err = array.Append(v) + require.NoError(t, err) - // Append elements - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) - require.Equal(t, savedRootID, array.SlabID()) - } + expectedValues[i] = v + r++ + } + require.False(t, array.root.IsData()) - require.True(t, typeInfoComparator(typeInfo, array.Type())) - require.Equal(t, address, array.Address()) - require.Equal(t, uint64(arraySize), array.Count()) + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(string(r)), v) - // Remove elements - for i := uint64(0); i < arraySize; i++ { - storable, err := array.Remove(0) - require.NoError(t, err) - require.Equal(t, Uint64Value(i), storable) - require.Equal(t, savedRootID, array.SlabID()) - } + // Mutate primitive array elements by overwritting existing elements with larger elements. + // Larger elements causes slabs to split. + newValue := NewStringValue(strings.Repeat(string(r), 25)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) - require.True(t, typeInfoComparator(typeInfo, array.Type())) - require.Equal(t, address, array.Address()) - require.Equal(t, uint64(0), array.Count()) - require.Equal(t, savedRootID, array.SlabID()) -} + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(string(r)), existingValue) -func TestArraySetRandomValues(t *testing.T) { + expectedValues[i] = newValue - SetThreshold(256) - defer SetThreshold(1024) + i++ + r++ - const arraySize = 4096 + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) - r := newRand(t) + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + const arraySize = 80 - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - for i := uint64(0); i < arraySize; i++ { - oldValue := values[i] - newValue := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = newValue + expectedValues := make([]Value, arraySize) + r := rune('a') + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(strings.Repeat(string(r), 25)) + err = array.Append(v) + require.NoError(t, err) - existingStorable, err := array.Set(i, newValue) - require.NoError(t, err) + expectedValues[i] = v + r++ + } + require.False(t, array.root.IsData()) - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) - } + i := 0 + r = rune('a') + err = array.Iterate(func(v Value) (bool, error) { + require.Equal(t, NewStringValue(strings.Repeat(string(r), 25)), v) - verifyArray(t, storage, typeInfo, address, array, values, false) -} + // Mutate primitive array elements by overwritting existing elements with smaller elements. + // Smaller elements causes slabs to merge. + newValue := NewStringValue(string(r)) + existingStorable, err := array.Set(uint64(i), newValue) + require.NoError(t, err) -func TestArrayInsertRandomValues(t *testing.T) { + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat(string(r), 25)), existingValue) - SetThreshold(256) - defer SetThreshold(1024) + expectedValues[i] = newValue - t.Run("insert-first", func(t *testing.T) { + i++ + r++ - const arraySize = 4096 + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) - r := newRand(t) + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) @@ -1124,23 +1373,58 @@ func TestArrayInsertRandomValues(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[arraySize-i-1] = v + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err := array.Insert(0, v) + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) require.NoError(t, err) + + expectedValues[i] = arrayValue{v} } + require.True(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, values, false) - }) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) - t.Run("insert-last", func(t *testing.T) { + // Mutate array elements by inserting more elements to child arrays. + newElement := Uint64Value(0) + err := childArray.Append(newElement) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) + require.True(t, childArray.Inlined()) - const arraySize = 4096 + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - r := newRand(t) + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 25 typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) @@ -1149,23 +1433,62 @@ func TestArrayInsertRandomValues(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err := array.Insert(i, v) + v := Uint64Value(i) + err = childArray.Append(v) require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} } + require.False(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, values, false) - }) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) - t.Run("insert-random", func(t *testing.T) { + // Mutate array elements by inserting more elements to child arrays. + newElement := Uint64Value(0) + err := childArray.Append(newElement) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) + require.True(t, childArray.Inlined()) - const arraySize = 4096 + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - r := newRand(t) + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 15 + childArraySize = 1 + mutatedChildArraySize = 4 + ) typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) @@ -1174,2542 +1497,7436 @@ func TestArrayInsertRandomValues(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - k := r.Intn(int(i) + 1) - v := randomValue(r, int(maxInlineArrayElementSize)) + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - copy(values[k+1:], values[k:]) - values[k] = v + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - err := array.Insert(uint64(k), v) + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) require.NoError(t, err) + + expectedValues[i] = expectedValue } + require.True(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, values, false) - }) -} + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) -func TestArrayRemoveRandomValues(t *testing.T) { + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + // Mutate array elements by inserting more elements to child arrays. + for j := i; j < i+mutatedChildArraySize-childArraySize; j++ { + newElement := Uint64Value(j) - const arraySize = 4096 + err := childArray.Append(newElement) + require.NoError(t, err) - r := newRand(t) + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + } - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedValues[i] = expectedChildArrayValues - values := make([]Value, arraySize) - // Insert n random values into array - for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + i++ - err := array.Insert(i, v) + return true, nil + }) require.NoError(t, err) - } + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) - verifyArray(t, storage, typeInfo, address, array, values, false) + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - // Remove n elements at random index - for i := uint64(0); i < arraySize; i++ { - k := r.Intn(int(array.Count())) + const ( + arraySize = 25 + childArraySize = 1 + mutatedChildArraySize = 4 + ) - existingStorable, err := array.Remove(uint64(k)) - require.NoError(t, err) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - existingValue, err := existingStorable.StoredValue(storage) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) - copy(values[k:], values[k+1:]) - values = values[:len(values)-1] - - if id, ok := existingStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - } - verifyEmptyArray(t, storage, typeInfo, address, array) -} + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) -func testArrayAppendSetInsertRemoveRandomValues( - t *testing.T, - r *rand.Rand, - storage *PersistentSlabStorage, - typeInfo TypeInfo, - address Address, - opCount int, -) (*Array, []Value) { - const ( - ArrayAppendOp = iota - ArrayInsertOp - ArraySetOp - ArrayRemoveOp - MaxArrayOp - ) + expectedValue = append(expectedValue, v) + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + err = array.Append(childArray) + require.NoError(t, err) - values := make([]Value, 0, opCount) - for i := 0; i < opCount; i++ { + expectedValues[i] = expectedValue + } + require.False(t, array.root.IsData()) - var nextOp int + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - for { - nextOp = r.Intn(MaxArrayOp) + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - if array.Count() > 0 || (nextOp != ArrayRemoveOp && nextOp != ArraySetOp) { - break - } - } + // Mutate array elements by inserting more elements to child arrays. + for j := i; j < i+mutatedChildArraySize-childArraySize; j++ { + newElement := Uint64Value(j) - switch nextOp { + err := childArray.Append(newElement) + require.NoError(t, err) - case ArrayAppendOp: - v := randomValue(r, int(maxInlineArrayElementSize)) - values = append(values, v) + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + } - err := array.Append(v) - require.NoError(t, err) + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - case ArraySetOp: - k := r.Intn(int(array.Count())) - v := randomValue(r, int(maxInlineArrayElementSize)) + expectedValues[i] = expectedChildArrayValues - oldV := values[k] + i++ - values[k] = v + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.False(t, array.root.IsData()) - existingStorable, err := array.Set(uint64(k), v) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldV, existingValue) + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - if id, ok := existingStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } + const ( + arraySize = 10 + childArraySize = 10 + mutatedChildArraySize = 1 + ) - case ArrayInsertOp: - k := r.Intn(int(array.Count() + 1)) - v := randomValue(r, int(maxInlineArrayElementSize)) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - if k == int(array.Count()) { - values = append(values, v) - } else { - values = append(values, nil) - copy(values[k+1:], values[k:]) - values[k] = v - } + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err := array.Insert(uint64(k), v) + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - case ArrayRemoveOp: - k := r.Intn(int(array.Count())) + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - existingStorable, err := array.Remove(uint64(k)) - require.NoError(t, err) + expectedValue = append(expectedValue, v) + } - existingValue, err := existingStorable.StoredValue(storage) + err = array.Append(childArray) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[k], existingValue) - - copy(values[k:], values[k+1:]) - values = values[:len(values)-1] - if id, ok := existingStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } + expectedValues[i] = expectedValue } - require.Equal(t, uint64(len(values)), array.Count()) - require.True(t, typeInfoComparator(typeInfo, array.Type())) - require.Equal(t, address, array.Address()) - } + require.False(t, array.root.IsData()) - return array, values -} + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) -func TestArrayAppendSetInsertRemoveRandomValues(t *testing.T) { + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + for j := childArraySize - 1; j > mutatedChildArraySize-1; j-- { + existingStorble, err := childArray.Remove(uint64(j)) + require.NoError(t, err) - const opCount = 4096 + existingValue, err := existingStorble.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), existingValue) + } - r := newRand(t) + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedValues[i] = expectedChildArrayValues[:mutatedChildArraySize] - array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) - verifyArray(t, storage, typeInfo, address, array, values, false) -} + i++ -func TestArrayNestedArrayMap(t *testing.T) { + return true, nil + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) - SetThreshold(256) - defer SetThreshold(1024) + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) - t.Run("small array", func(t *testing.T) { + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - const arraySize = 4096 + const ( + arraySize = 2 + childArraySize = 1 + mutatedChildArraySize = 50 + ) - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create a list of arrays with 2 elements. - nestedArrays := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) - require.NoError(t, err) + var expectedValue arrayValue + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - require.True(t, nested.root.IsData()) + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) - nestedArrays[i] = nested + expectedValues[i] = expectedValue } - typeInfo := testTypeInfo{42} + require.True(t, array.root.IsData()) - array, err := NewArray(storage, address, typeInfo) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize; j < mutatedChildArraySize; j++ { + v := Uint64Value(i + j) + + err := childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues = append(expectedChildArrayValues, v) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues + + i++ + + return true, nil + }) require.NoError(t, err) + require.Equal(t, arraySize, i) - for _, a := range nestedArrays { - err := array.Append(a) - require.NoError(t, err) - } + require.True(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, nestedArrays, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("big array", func(t *testing.T) { + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - const arraySize = 4096 + const ( + arraySize = 10 + childArraySize = 10 + mutatedChildArraySize = 50 + ) - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewArray(storage, address, nestedTypeInfo) + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for i := uint64(0); i < 40; i++ { - err := nested.Append(Uint64Value(math.MaxUint64)) - require.NoError(t, err) - } - - require.False(t, nested.root.IsData()) + var expectedValue arrayValue - values[i] = nested - } + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + expectedValue = append(expectedValue, v) + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) + err = array.Append(childArray) require.NoError(t, err) - } - - verifyArray(t, storage, typeInfo, address, array, values, true) - }) - t.Run("small map", func(t *testing.T) { + expectedValues[i] = expectedValue + } - const arraySize = 4096 + require.False(t, array.root.IsData()) - nestedTypeInfo := testTypeInfo{43} + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - storage := newTestPersistentStorage(t) + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + for j := childArraySize; j < mutatedChildArraySize; j++ { + v := Uint64Value(i + j) - nestedMaps := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) - require.NoError(t, err) + err := childArray.Append(v) + require.NoError(t, err) - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) - require.NoError(t, err) - require.Nil(t, storable) + expectedChildArrayValues = append(expectedChildArrayValues, v) + } - require.True(t, nested.root.IsData()) + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) - nestedMaps[i] = nested - } + expectedValues[i] = expectedChildArrayValues - typeInfo := testTypeInfo{42} + i++ - array, err := NewArray(storage, address, typeInfo) + return true, nil + }) require.NoError(t, err) + require.Equal(t, arraySize, i) + require.True(t, array.root.IsData()) - for _, a := range nestedMaps { - err := array.Append(a) - require.NoError(t, err) - } - - verifyArray(t, storage, typeInfo, address, array, nestedMaps, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("big map", func(t *testing.T) { + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - const arraySize = 4096 + const ( + arraySize = 2 + childArraySize = 50 + mutatedChildArraySize = 1 + ) - nestedTypeInfo := testTypeInfo{43} + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { - nested, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for i := uint64(0); i < 25; i++ { - storable, err := nested.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*2)) + var expectedValue arrayValue + + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) require.NoError(t, err) - require.Nil(t, storable) + + expectedValue = append(expectedValue, v) } - require.False(t, nested.root.IsData()) + err = array.Append(childArray) + require.NoError(t, err) - values[i] = nested + expectedValues[i] = expectedValue } - typeInfo := testTypeInfo{42} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - for _, a := range values { - err := array.Append(a) - require.NoError(t, err) - } + require.True(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, values, true) - }) -} + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) -func TestArrayDecodeV0(t *testing.T) { + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + for j := childArraySize - 1; j > mutatedChildArraySize-1; j-- { + existingStorable, err := childArray.Remove(uint64(j)) + require.NoError(t, err) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - arraySlabID := SlabID{ - address: address, - index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}, - } + value, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), value) + } - slabData := map[SlabID][]byte{ - arraySlabID: { - // extra data - // version - 0x00, - // extra data flag - 0x80, - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) - // version - 0x00, - // array data slab flag - 0x80, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x00, - }, - } + expectedValues[i] = expectedChildArrayValues[:1] - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + i++ - // Test new array from storage - array, err := NewArrayWithRootID(storage, arraySlabID) + return true, nil + }) require.NoError(t, err) + require.Equal(t, arraySize, i) + + require.True(t, array.root.IsData()) - verifyEmptyArray(t, storage, typeInfo, address, array) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("dataslab as root", func(t *testing.T) { - typeInfo := testTypeInfo{42} + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const ( + arraySize = 4 + childArraySize = 50 + mutatedChildArraySize = 25 + ) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - arraySlabID := SlabID{ - address: address, - index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}, - } - values := []Value{ - Uint64Value(0), - } + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - slabData := map[SlabID][]byte{ - arraySlabID: { - // extra data - // version - 0x00, - // extra data flag - 0x80, - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // version - 0x00, - // array data slab flag - 0x80, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, - }, + var expectedValue arrayValue + + for j := i; j < i+childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedValue = append(expectedValue, v) + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = expectedValue } - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + require.True(t, array.root.IsData()) - // Test new array from storage - array, err := NewArrayWithRootID(storage, arraySlabID) + i := 0 + err = array.Iterate(func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(childArraySize), childArray.Count()) + require.False(t, childArray.Inlined()) + + expectedChildArrayValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + for j := childArraySize - 1; j >= mutatedChildArraySize; j-- { + existingStorable, err := childArray.Remove(uint64(j)) + require.NoError(t, err) + + value, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, Uint64Value(i+j), value) + } + + require.Equal(t, uint64(mutatedChildArraySize), childArray.Count()) + require.True(t, childArray.Inlined()) + + expectedValues[i] = expectedChildArrayValues[:mutatedChildArraySize] + + i++ + + return true, nil + }) require.NoError(t, err) + require.Equal(t, arraySize, i) + + require.False(t, array.root.IsData()) - verifyArray(t, storage, typeInfo, address, array, values, false) + testArray(t, storage, typeInfo, address, array, expectedValues, false) }) - t.Run("metadataslab as root", func(t *testing.T) { - storage := newTestBasicStorage(t) - typeInfo := testTypeInfo{42} + t.Run("stop", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - arraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - arrayDataSlabID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - arrayDataSlabID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - nestedArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - const arraySize = 20 - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize-1; i++ { - values[i] = NewStringValue(strings.Repeat("a", 22)) + const count = 10 + for i := uint64(0); i < count; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - nestedArray.root.SetSlabID(nestedArraySlabID) + i := 0 + err = array.Iterate(func(_ Value) (bool, error) { + if i == count/2 { + return false, nil + } + i++ + return true, nil + }) require.NoError(t, err) + require.Equal(t, count/2, i) + }) + + t.Run("error", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - err = nestedArray.Append(Uint64Value(0)) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values[arraySize-1] = nestedArray + const count = 10 + for i := uint64(0); i < count; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } - slabData := map[SlabID][]byte{ - // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] - arraySlabID: { - // extra data - // version - 0x00, - // extra data flag - 0x81, - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + testErr := errors.New("test") - // version - 0x00, - // array meta data slab flag - 0x81, - // child header count - 0x00, 0x02, - // child header 1 (slab id, count, size) - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x09, - 0x00, 0x00, 0x00, 0xe4, - // child header 2 - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x0b, - 0x00, 0x00, 0x01, 0x0e, - }, + i := 0 + err = array.Iterate(func(_ Value) (bool, error) { + if i == count/2 { + return false, testErr + } + i++ + return true, nil + }) + // err is testErr wrapped in ExternalError. + require.Equal(t, 1, errorCategorizationCount(err)) + var externalError *ExternalError + require.ErrorAs(t, err, &externalError) + require.Equal(t, testErr, externalError.Unwrap()) - // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] - arrayDataSlabID1: { - // version - 0x00, - // array data slab flag - 0x00, - // next slab id - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x09, - // CBOR encoded array elements - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - }, + require.Equal(t, count/2, i) + }) +} - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] - arrayDataSlabID2: { - // version - 0x00, - // array data slab flag - 0x40, - // next slab id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x0b, - // CBOR encoded array elements - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, +func testArrayIterateRange(t *testing.T, array *Array, values []Value) { + var i uint64 + var err error + var sliceOutOfBoundsError *SliceOutOfBoundsError + var invalidSliceIndexError *InvalidSliceIndexError - // (data slab) next: 0, data: [0] - nestedArraySlabID: { - // extra data - // version - 0x00, - // extra data flag - 0x80, - // array of extra data - 0x81, - // type info - 0x18, 0x2b, + count := array.Count() + + // If startIndex > count, IterateRange returns SliceOutOfBoundsError + err = array.IterateReadOnlyRange(count+1, count+1, func(v Value) (bool, error) { + i++ + return true, nil + }) + require.Equal(t, 1, errorCategorizationCount(err)) + + var userError *UserError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &sliceOutOfBoundsError) + require.ErrorAs(t, userError, &sliceOutOfBoundsError) + require.Equal(t, uint64(0), i) + + // If endIndex > count, IterateRange returns SliceOutOfBoundsError + err = array.IterateReadOnlyRange(0, count+1, func(v Value) (bool, error) { + i++ + return true, nil + }) + require.Equal(t, 1, errorCategorizationCount(err)) + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &sliceOutOfBoundsError) + require.ErrorAs(t, userError, &sliceOutOfBoundsError) + require.Equal(t, uint64(0), i) + + // If startIndex > endIndex, IterateRange returns InvalidSliceIndexError + if count > 0 { + err = array.IterateReadOnlyRange(1, 0, func(v Value) (bool, error) { + i++ + return true, nil + }) + require.Equal(t, 1, errorCategorizationCount(err)) + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &invalidSliceIndexError) + require.ErrorAs(t, userError, &invalidSliceIndexError) + require.Equal(t, uint64(0), i) + } + + // IterateRange returns no error and iteration function is called on sliced array + for startIndex := uint64(0); startIndex <= count; startIndex++ { + for endIndex := startIndex; endIndex <= count; endIndex++ { + i = uint64(0) + err = array.IterateReadOnlyRange(startIndex, endIndex, func(v Value) (bool, error) { + valueEqual(t, v, values[int(startIndex+i)]) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, endIndex-startIndex, i) + } + } +} + +func TestReadOnlyArrayIterateRange(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + testArrayIterateRange(t, array, []Value{}) + }) + + t.Run("dataslab as root", func(t *testing.T) { + const arraySize = 10 + + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + value := Uint64Value(i) + values[i] = value + err := array.Append(value) + require.NoError(t, err) + } + + testArrayIterateRange(t, array, values) + }) + + t.Run("metadataslab as root", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 1024 + + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + value := Uint64Value(i) + values[i] = value + err := array.Append(value) + require.NoError(t, err) + } + + testArrayIterateRange(t, array, values) + }) + + t.Run("stop", func(t *testing.T) { + const arraySize = 10 + + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, nil + } + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, count/2, i) + }) + + t.Run("error", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 10 + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + testErr := errors.New("test") + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateReadOnlyRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, testErr + } + i++ + return true, nil + }) + // err is testErr wrapped in ExternalError. + require.Equal(t, 1, errorCategorizationCount(err)) + var externalError *ExternalError + require.ErrorAs(t, err, &externalError) + require.Equal(t, testErr, externalError.Unwrap()) + require.Equal(t, count/2, i) + }) +} + +func TestMutableArrayIterateRange(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := 0 + err = array.IterateRange(0, 0, func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + require.True(t, array.root.IsData()) + + sizeBeforeMutation := array.root.Header().size + + i := 0 + startIndex := uint64(1) + endIndex := array.Count() - 2 + newElement := Uint64Value(0) + err = array.IterateRange(startIndex, endIndex, func(v Value) (bool, error) { + childArray, ok := v.(*Array) + require.True(t, ok) + require.Equal(t, uint64(1), childArray.Count()) + require.True(t, childArray.Inlined()) + + err := childArray.Append(newElement) + require.NoError(t, err) + + index := int(startIndex) + i + expectedChildArrayValues, ok := expectedValues[index].(arrayValue) + require.True(t, ok) + + expectedChildArrayValues = append(expectedChildArrayValues, newElement) + expectedValues[index] = expectedChildArrayValues + + i++ + + require.Equal(t, array.root.Header().size, sizeBeforeMutation+uint32(i)*newElement.ByteSize()) + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, endIndex-startIndex, uint64(i)) + require.True(t, array.root.IsData()) + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("stop", func(t *testing.T) { + const arraySize = 10 + + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, nil + } + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, count/2, i) + }) + + t.Run("error", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 10 + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + testErr := errors.New("test") + + i := uint64(0) + startIndex := uint64(1) + endIndex := uint64(5) + count := endIndex - startIndex + err = array.IterateRange(startIndex, endIndex, func(_ Value) (bool, error) { + if i == count/2 { + return false, testErr + } + i++ + return true, nil + }) + // err is testErr wrapped in ExternalError. + require.Equal(t, 1, errorCategorizationCount(err)) + var externalError *ExternalError + require.ErrorAs(t, err, &externalError) + require.Equal(t, testErr, externalError.Unwrap()) + require.Equal(t, count/2, i) + }) +} + +func TestArrayRootSlabID(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + savedRootID := array.SlabID() + require.NotEqual(t, SlabIDUndefined, savedRootID) + + // Append elements + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + require.Equal(t, savedRootID, array.SlabID()) + } + + require.True(t, typeInfoComparator(typeInfo, array.Type())) + require.Equal(t, address, array.Address()) + require.Equal(t, uint64(arraySize), array.Count()) + + // Remove elements + for i := uint64(0); i < arraySize; i++ { + storable, err := array.Remove(0) + require.NoError(t, err) + require.Equal(t, Uint64Value(i), storable) + require.Equal(t, savedRootID, array.SlabID()) + } + + require.True(t, typeInfoComparator(typeInfo, array.Type())) + require.Equal(t, address, array.Address()) + require.Equal(t, uint64(0), array.Count()) + require.Equal(t, savedRootID, array.SlabID()) +} + +func TestArraySetRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + for i := uint64(0); i < arraySize; i++ { + oldValue := values[i] + newValue := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = newValue + + existingStorable, err := array.Set(i, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, oldValue, existingValue) + } + + testArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayInsertRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("insert-first", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[arraySize-i-1] = v + + err := array.Insert(0, v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + }) + + t.Run("insert-last", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Insert(i, v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + }) + + t.Run("insert-random", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + k := r.Intn(int(i) + 1) + v := randomValue(r, int(maxInlineArrayElementSize)) + + copy(values[k+1:], values[k:]) + values[k] = v + + err := array.Insert(uint64(k), v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + }) +} + +func TestArrayRemoveRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + // Insert n random values into array + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Insert(i, v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + + // Remove n elements at random index + for i := uint64(0); i < arraySize; i++ { + k := r.Intn(int(array.Count())) + + existingStorable, err := array.Remove(uint64(k)) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[k], existingValue) + + copy(values[k:], values[k+1:]) + values = values[:len(values)-1] + + if id, ok := existingStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + } + + testEmptyArray(t, storage, typeInfo, address, array) +} + +func testArrayAppendSetInsertRemoveRandomValues( + t *testing.T, + r *rand.Rand, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + opCount int, +) (*Array, []Value) { + const ( + ArrayAppendOp = iota + ArrayInsertOp + ArraySetOp + ArrayRemoveOp + MaxArrayOp + ) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, 0, opCount) + for i := 0; i < opCount; i++ { + + var nextOp int + + for { + nextOp = r.Intn(MaxArrayOp) + + if array.Count() > 0 || (nextOp != ArrayRemoveOp && nextOp != ArraySetOp) { + break + } + } + + switch nextOp { + + case ArrayAppendOp: + v := randomValue(r, int(maxInlineArrayElementSize)) + values = append(values, v) + + err := array.Append(v) + require.NoError(t, err) + + case ArraySetOp: + k := r.Intn(int(array.Count())) + v := randomValue(r, int(maxInlineArrayElementSize)) + + oldV := values[k] + + values[k] = v + + existingStorable, err := array.Set(uint64(k), v) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, oldV, existingValue) + + if id, ok := existingStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + case ArrayInsertOp: + k := r.Intn(int(array.Count() + 1)) + v := randomValue(r, int(maxInlineArrayElementSize)) + + if k == int(array.Count()) { + values = append(values, v) + } else { + values = append(values, nil) + copy(values[k+1:], values[k:]) + values[k] = v + } + + err := array.Insert(uint64(k), v) + require.NoError(t, err) + + case ArrayRemoveOp: + k := r.Intn(int(array.Count())) + + existingStorable, err := array.Remove(uint64(k)) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[k], existingValue) + + copy(values[k:], values[k+1:]) + values = values[:len(values)-1] + + if id, ok := existingStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + } + + require.Equal(t, uint64(len(values)), array.Count()) + require.True(t, typeInfoComparator(typeInfo, array.Type())) + require.Equal(t, address, array.Address()) + } + + return array, values +} + +func TestArrayAppendSetInsertRemoveRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const opCount = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + testArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayWithChildArrayMap(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small array", func(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Create child arrays with 1 element. + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + v := Uint64Value(i) + + err = childArray.Append(v) + require.NoError(t, err) + + require.True(t, childArray.root.IsData()) + require.False(t, childArray.Inlined()) + + err = array.Append(childArray) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues[i] = arrayValue{v} + } + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("big array", func(t *testing.T) { + + const arraySize = 4096 + const childArraySize = 40 + + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // Create child arrays with 40 element. + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for i := uint64(0); i < childArraySize; i++ { + v := Uint64Value(math.MaxUint64) + + err := childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues[i] = v + } + + require.False(t, childArray.root.IsData()) + + err = array.Append(childArray) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedValues[i] = arrayValue(expectedChildArrayValues) + } + + testArray(t, storage, typeInfo, address, array, expectedValues, true) + }) + + t.Run("small map", func(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + childArayTypeInfo := testTypeInfo{43} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childArayTypeInfo) + require.NoError(t, err) + + k := Uint64Value(i) + v := Uint64Value(i * 2) + storable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + require.True(t, childMap.root.IsData()) + + err = array.Append(childMap) + require.NoError(t, err) + + expectedValues[i] = mapValue{k: v} + } + + testArray(t, storage, typeInfo, address, array, expectedValues, false) + }) + + t.Run("big map", func(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + nestedTypeInfo := testTypeInfo{43} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), nestedTypeInfo) + require.NoError(t, err) + + expectedChildMapValues := mapValue{} + for i := uint64(0); i < 25; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + storable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + expectedChildMapValues[k] = v + } + + require.False(t, childMap.root.IsData()) + + err = array.Append(childMap) + require.NoError(t, err) + + expectedValues[i] = expectedChildMapValues + } + + testArray(t, storage, typeInfo, address, array, expectedValues, true) + }) +} + +func TestArrayDecodeV0(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + arraySlabID := SlabID{ + address: address, + index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}, + } + + slabData := map[SlabID][]byte{ + arraySlabID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new array from storage + array, err := NewArrayWithRootID(storage, arraySlabID) + require.NoError(t, err) + + testEmptyArrayV0(t, storage, typeInfo, address, array) + }) + + t.Run("dataslab as root", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + arraySlabID := SlabID{ + address: address, + index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}, + } + + values := []Value{ + Uint64Value(0), + } + + slabData := map[SlabID][]byte{ + arraySlabID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new array from storage + array, err := NewArrayWithRootID(storage, arraySlabID) + require.NoError(t, err) + + testArrayV0(t, storage, typeInfo, address, array, values, false) + }) + + t.Run("metadataslab as root", func(t *testing.T) { + storage := newTestBasicStorage(t) + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} + + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + arraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + arrayDataSlabID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + arrayDataSlabID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + childArraySlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + const arraySize = 20 + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + values[i] = NewStringValue(strings.Repeat("a", 22)) + } + + childArray, err := NewArray(storage, address, childTypeInfo) + childArray.root.SetSlabID(childArraySlabID) + require.NoError(t, err) + + v := Uint64Value(0) + err = childArray.Append(v) + require.NoError(t, err) + + values[arraySize-1] = arrayValue{v} + + slabData := map[SlabID][]byte{ + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + arraySlabID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (slab id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + arrayDataSlabID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + arrayDataSlabID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next slab id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // (data slab) next: 0, data: [0] + childArraySlabID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, slabData) + + // Test new array from storage2 + array, err := NewArrayWithRootID(storage2, arraySlabID) + require.NoError(t, err) + + testArrayV0(t, storage2, typeInfo, address, array, values, false) + }) +} + +func TestArrayEncodeDecode(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedData := []byte{ + // version + 0x10, + // flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + } + + slabData, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(slabData)) + require.Equal(t, expectedData, slabData[array.SlabID()]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, slabData) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testEmptyArray(t, storage2, typeInfo, address, array2) + }) + + t.Run("root dataslab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + v := Uint64Value(0) + values := []Value{v} + err = array.Append(v) + require.NoError(t, err) + + expectedData := []byte{ + // version + 0x10, + // flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + } + + slabData, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(slabData)) + require.Equal(t, expectedData, slabData[array.SlabID()]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, slabData) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, values, false) + }) + + t.Run("root metadata slab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 18 + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id3: { + // version + 0x10, + // array data slab flag + 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, values, false) + }) + + // Same type info is reused. + t.Run("root data slab, inlined child array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + childTypeInfo := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + err = childArray.Append(v) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // element 0: array of type info + 0x80, + // element 1: array of extra data + 0x81, + // array extra data + 0xd8, 0xf7, + 0x81, + // array type info ref + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + // Different type info are encoded. + t.Run("root data slab, inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = childArray.Append(v) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{v} + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + // Same type info is reused. + t.Run("root data slab, multiple levels of inlined array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + err = gchildArray.Append(v) + require.NoError(t, err) + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + typeInfo4 := testTypeInfo{45} + typeInfo5 := testTypeInfo{46} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 2 + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + + var ti TypeInfo + if i == 0 { + ti = typeInfo2 + } else { + ti = typeInfo4 + } + gchildArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = gchildArray.Append(v) + require.NoError(t, err) + + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo5 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue{ + arrayValue{ + v, + }, + } + } + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x84, + // typeInfo3 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // typeInfo2 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // typeInfo5 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2e, + // typeInfo4 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2d, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x03, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + for i := 0; i < 2; i++ { + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + v := Uint64Value(i) + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + for i := 0; i < 2; i++ { + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) + + v := Uint64Value(i) + + err = childArray.Append(v) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x82, + // element 0: array of inlined extra data + 0x80, + // element 1: array of inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("has pointers", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + const childArraySize = 5 + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + expectedChildArrayValues[i] = v + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue(expectedChildArrayValues)) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(5), childArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + id3: { + // version (no next slab ID, no inlined slabs) + 0x10, + // array data slab flag + 0x40, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // (data slab) next: 0, data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) + + t.Run("has pointers in inlined slab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + expectedValues := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, v) + } + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + const gchildArraySize = 5 + + expectedGChildArrayValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedGChildArrayValues[i] = v + } + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{ + arrayValue(expectedGChildArrayValues), + }) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(5), gchildArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:287 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x1f, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [SlabID(...)]] + id3: { + // version (no next slab ID, has inlined slabs) + 0x11, + // array data slab flag (has pointer) + 0x40, + + // inlined extra data + 0x82, + // element 0: array of type info + 0x80, + // element 1: array of extra data + 0x81, + // type info + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + }, + + // (data slab) data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, expectedValues, false) + }) +} + +func TestArrayEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const opCount = 8192 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + + testArray(t, storage, typeInfo, address, array, values, false) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + testArray(t, storage2, typeInfo, address, array2, values, false) +} + +func TestEmptyArray(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestBasicStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := array.Get(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("set", func(t *testing.T) { + s, err := array.Set(0, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("insert", func(t *testing.T) { + err := array.Insert(1, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + }) + + t.Run("remove", func(t *testing.T) { + s, err := array.Remove(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("readonly iterate", func(t *testing.T) { + i := uint64(0) + err := array.IterateReadOnly(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("iterate", func(t *testing.T) { + i := uint64(0) + err := array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("count", func(t *testing.T) { + count := array.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, array.Type())) + }) + + // TestArrayEncodeDecode/empty tests empty array encoding and decoding +} + +func TestArrayStringElement(t *testing.T) { + + t.Parallel() + + t.Run("inline", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize - 3) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(0), stats.StorableSlabCount) + }) + + t.Run("external slab", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize + 512) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(arraySize), stats.StorableSlabCount) + }) +} + +func TestArrayStoredValue(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + rootID := array.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + array2, ok := value.(*Array) + require.True(t, ok) + + testArray(t, storage, typeInfo, address, array2, values, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestArrayPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := uint64(0) + err = array.PopIterate(func(v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + testEmptyArray(t, storage, typeInfo, address, array) + }) +} + +func TestArrayFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + testEmptyArray(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, v) + + err = array.Insert(0, v) + require.NoError(t, err) + + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + } + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, nil) + copy(values[25+1:], values[25:]) + values[25] = v + + err = array.Insert(25, v) + require.NoError(t, err) + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + iter, err := array.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + testArray(t, storage, typeInfo, address, copied, values, false) + }) +} + +func TestArrayNestedStorables(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + + const arraySize = 1024 * 4 + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := strings.Repeat("a", int(i)) + v := SomeValue{Value: NewStringValue(s)} + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + testArray(t, storage, typeInfo, address, array, values, true) +} + +func TestArrayMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var values []Value + for i := 0; i < 2; i++ { + // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. + v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + // Size of root data slab with two elements of max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab), and minus 1 byte + // (for rounding when computing max inline array element size). + require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + + testArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5]` + require.Equal(t, want, array.String()) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` + require.Equal(t, want, array.String()) + }) +} + +func TestArraySlabDump(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + } + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", + "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", + "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("overflow", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) + require.NoError(t, err) + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func errorCategorizationCount(err error) int { + var fatalError *FatalError + var userError *UserError + var externalError *ExternalError + + count := 0 + if errors.As(err, &fatalError) { + count++ + } + if errors.As(err, &userError) { + count++ + } + if errors.As(err, &externalError) { + count++ + } + return count +} + +func TestArrayLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, nil) + }) + + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[i+1:] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[:i] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + unloadValueIndex := 1 + + slabID := childSlabIDs[unloadValueIndex] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + i := 0 + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { + // At this point, iterator returned first element (v). + + // Remove all other nested composite elements (except first element) from storage. + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) + require.NoError(t, err) + } + + require.Equal(t, 0, i) + valueEqual(t, values[0], v) + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) + + t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 3 + + // Create an array with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { + storage := newTestPersistentStorage(t) + + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) + + // parent array: 1 root data slab + // nested composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element + err := storage.Remove(childSlabID) + require.NoError(t, err) + + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + require.Equal(t, 3, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(childSlabIDs); i++ { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[i+1:] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(childSlabIDs) - 1; i >= 0; i-- { + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + expectedValues := values[:i] + testArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + for _, index := range []int{4, 14} { + + slabID := childSlabIDs[index] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 20 + + // Create an array with composite value at specified index. + for childArrayIndex := 0; childArrayIndex < arraySize; childArrayIndex++ { + storage := newTestPersistentStorage(t) + + array, values, childSlabID := createArrayWithSimpleAndChildArrayValues(t, storage, address, typeInfo, arraySize, childArrayIndex) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+1, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + // Unload composite value + err := storage.Remove(childSlabID) + require.NoError(t, err) + + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from front to back + for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from back to front + for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[:len(values)-int(childHeader.count)] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + testArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + require.True(t, len(metaDataSlab.childrenHeaders) > 2) + + index := 1 + childHeader := metaDataSlab.childrenHeaders[index] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) + values = values[:array.Count()-uint64(childHeader.count)] + + testArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, childSlabIDs := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + r := newRand(t) + + // Unload random composite element + for len(values) > 0 { + + i := r.Intn(len(values)) + + slabID := childSlabIDs[i] + + err := storage.Remove(slabID) + require.NoError(t, err) + + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] + + testArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + type slabInfo struct { + id SlabID + startIndex int + count int + } + + count := 0 + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { + nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + for _, h := range nonrootMetaDataSlab.childrenHeaders { + dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} + dataSlabInfos = append(dataSlabInfos, dataSlabInfo) + count += int(h.count) + } + } + + r := newRand(t) + + // Unload random data slab. + for len(dataSlabInfos) > 0 { + indexToUnload := r.Intn(len(dataSlabInfos)) + + slabInfoToUnload := dataSlabInfos[indexToUnload] + + // Update startIndex for all data slabs after indexToUnload. + for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabInfoToUnload.count + } + + // Remove slabInfo to be unloaded from dataSlabInfos. + copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + err := storage.Remove(slabInfoToUnload.id) + require.NoError(t, err) + + copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) + values = values[:len(values)-slabInfoToUnload.count] + + testArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) + + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values, _ := createArrayWithChildArrays(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + testArrayLoadedElements(t, array, values) + + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo + } + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + var dataSlabCount, metadataSlabCount int + nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { + + nonrootMetadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: metadataSlabCount, + count: int(mheader.count), + } + metadataSlabCount += int(mheader.count) + + nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) + for i, h := range nonrootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: dataSlabCount, + count: int(h.count), + } + dataSlabCount += int(h.count) + } + + nonrootMetadataSlabInfo.children = children + nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + } + + r := newRand(t) + + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) + + for len(nonrootMetadataSlabInfos) > 0 { + + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool + + // Unload random metadata or data slab. + switch r.Intn(maxSlabType) { + + case metadataSlabType: + // Unload metadata slab at random index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + + slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence metadata and data slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + + case dataSlabType: + // Unload data slab at randome index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + + dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + + slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + + isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && + (dataSlabIndex == len(metaSlabInfo.children)-1) + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence data slabs. + for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { + metaSlabInfo.children[i].startIndex -= count + } + + copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) + metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + + metaSlabInfo.count -= count + + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + if len(metaSlabInfo.children) == 0 { + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + } + } + + err := storage.Remove(slabInfoToBeRemoved.id) + require.NoError(t, err) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + testArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) +} + +func createArrayWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := rune('a') + for i := 0; i < arraySize; i++ { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + + err := array.Append(values[i]) + require.NoError(t, err) + } + + return array, values +} + +func createArrayWithChildArrays( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value, []SlabID) { + const childArraySize = 50 + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + childSlabIDs := make([]SlabID, arraySize) + + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues[j] = v + } + + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabIDs[i] = childArray.SlabID() + + // Append nested array to parent + err = array.Append(childArray) + require.NoError(t, err) + } + + return array, expectedValues, childSlabIDs +} + +func createArrayWithSimpleAndChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, + compositeValueIndex int, +) (*Array, []Value, SlabID) { + const childArraySize = 50 + + require.True(t, compositeValueIndex < arraySize) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + var childSlabID SlabID + r := 'a' + for i := 0; i < arraySize; i++ { + + if compositeValueIndex == i { + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedChildArrayValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildArrayValues[j] = v + } + + err = array.Append(childArray) + require.NoError(t, err) + + expectedValues[i] = arrayValue(expectedChildArrayValues) + childSlabID = childArray.SlabID() + } else { + v := NewStringValue(strings.Repeat(string(r), 20)) + r++ + + err = array.Append(v) + require.NoError(t, err) + + expectedValues[i] = v + } + } + + return array, expectedValues, childSlabID +} + +func testArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { + i := 0 + err := array.IterateReadOnlyLoadedValues(func(v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, expectedValues[i], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} + +func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*ArrayMetaDataSlab); ok { + counter++ + } + } + return counter +} + +func TestArrayID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + sid := array.SlabID() + id := array.ValueID() + + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} + +func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { + const ( + arraySize = 3 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]*testMutableValue, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := newTestMutableValue(initialStorableSize) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + mv := values[i] + mv.updateStorableSize(mutatedStorableSize) + + existingStorable, err := array.Set(i, mv) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = VerifyArray(array, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) +} + +func TestChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, with one child array", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 1 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + err = childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[0] = expectedChildValues + + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. + + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from child array which triggers standalone array slab becomes inlined slab again. + for childArray.Count() > 0 { + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues = expectedChildValues[1:] + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, with two child arrays", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 2 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID + } + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + expectedStoredDeltas := 1 + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues - // version - 0x00, - // array data slab flag - 0x80, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, - }, + expectedStoredDeltas++ + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) // There are more stored slab because child array is no longer inlined. + + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + //expectedParentSize := arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize -= inlinedArrayDataSlabPrefixSize + uint32(childArray.Count()-1)*vSize + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, slabData) + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for i, child := range children { + childArray := child.array + childValueID := child.valueID - // Test new array from storage2 - array, err := NewArrayWithRootID(storage2, arraySlabID) - require.NoError(t, err) + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - verifyArray(t, storage2, typeInfo, address, array, values, false) - }) -} + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) -func TestArrayEncodeDecode(t *testing.T) { + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues - SetThreshold(256) - defer SetThreshold(1024) + require.True(t, childArray.Inlined()) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestBasicStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedStoredDeltas-- + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged - expectedData := []byte{ - // version - 0x10, - // flag - 0x80, + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + expectedParentSize -= SlabIDStorable{}.ByteSize() + expectedParentSize += expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x00, + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - slabData, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(slabData)) - require.Equal(t, expectedData, slabData[array.SlabID()]) + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, slabData) + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - verifyEmptyArray(t, storage2, typeInfo, address, array2) + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("dataslab as root", func(t *testing.T) { + t.Run("parent is root metadata slab, with four child arrays", func(t *testing.T) { + const arraySize = 4 + typeInfo := testTypeInfo{42} - storage := newTestBasicStorage(t) + storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - v := Uint64Value(0) - values := []Value{v} - err = array.Append(v) - require.NoError(t, err) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - expectedData := []byte{ - // version - 0x10, - // flag - 0x80, + // Test parent slab size with 4 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID } - slabData, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(slabData)) - require.Equal(t, expectedData, slabData[array.SlabID()]) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, slabData) + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[i] = expectedChildValues + + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has one data slab and all child arrays are not inlined. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - verifyArray(t, storage2, typeInfo, address, array2, values, false) + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[i] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for j, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) + + expectedChildValues = expectedChildValues[1:] + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Parent array has 1 data slab. + // All child arrays are inlined. + require.Equal(t, 1, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) + + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) }) +} + +func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 - t.Run("has pointers", func(t *testing.T) { typeInfo := testTypeInfo{42} - storage := newTestBasicStorage(t) + storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - const arraySize = 20 - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize-1; i++ { - v := NewStringValue(strings.Repeat("a", 22)) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - typeInfo2 := testTypeInfo{43} + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - nestedArray, err := NewArray(storage, address, typeInfo2) - require.NoError(t, err) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - err = nestedArray.Append(Uint64Value(0)) + // Get inlined child array + e, err := parentArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - values[arraySize-1] = nestedArray + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err = array.Append(nestedArray) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - require.Equal(t, uint64(arraySize), array.Count()) - require.Equal(t, uint64(1), nestedArray.Count()) + // Get inlined grand child array + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) - // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] - id1: { - // version - 0x10, - // flag - 0x81, + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2a, + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - // child shared address - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - // child header count - 0x00, 0x02, - // child header 1 (slab index, count, size) - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x09, - 0x00, 0xe4, - // child header 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x0b, - 0x01, 0x0e, - }, + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) - // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] - id2: { - // version - 0x12, - // array data slab flag - 0x00, - // next slab id - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x09, - // CBOR encoded array elements - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - }, + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] - id3: { - // version - 0x10, - // array data slab flag - 0x40, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x0b, - // CBOR encoded array elements - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues - // (data slab) next: 0, data: [0] - id4: { - // version - 0x10, - // extra data flag - 0x80, + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2b, + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, - }, + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - m, err := storage.Encode() + // Add one more element to grand child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + err = gchildArray.Append(v) require.NoError(t, err) - require.Equal(t, len(expected), len(m)) - require.Equal(t, expected[id1], m[id1]) - require.Equal(t, expected[id2], m[id2]) - require.Equal(t, expected[id3], m[id3]) - require.Equal(t, expected[id4], m[id4]) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, m) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - verifyArray(t, storage2, typeInfo, address, array2, values, false) - }) -} + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues -func TestArrayEncodeDecodeRandomValues(t *testing.T) { + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - SetThreshold(256) - defer SetThreshold(1024) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - const opCount = 8192 + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - r := newRand(t) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArray(t, storage, typeInfo, address, array, values, false) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // Decode data to new storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - verifyArray(t, storage2, typeInfo, address, array2, values, false) -} + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) -func TestEmptyArray(t *testing.T) { + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - t.Parallel() + expectedGChildValues = expectedGChildValues[1:] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestBasicStorage(t) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - t.Run("get", func(t *testing.T) { - s, err := array.Get(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - t.Run("set", func(t *testing.T) { - s, err := array.Set(0, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - t.Run("insert", func(t *testing.T) { - err := array.Insert(1, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - }) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - t.Run("remove", func(t *testing.T) { - s, err := array.Remove(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Run("iterate", func(t *testing.T) { - i := uint64(0) - err := array.Iterate(func(v Value) (bool, error) { - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) - }) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - t.Run("count", func(t *testing.T) { - count := array.Count() - require.Equal(t, uint64(0), count) - }) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, array.Type())) + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - // TestArrayEncodeDecode/empty tests empty array encoding and decoding -} + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers grand child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 -func TestArrayStringElement(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - t.Parallel() + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - t.Run("inline", func(t *testing.T) { + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - const arraySize = 4096 + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - r := newRand(t) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - stringSize := int(maxInlineArrayElementSize - 3) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) - } + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - array, err := NewArray(storage, address, typeInfo) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + // Get inlined grand child array + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) require.NoError(t, err) - } + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - verifyArray(t, storage, typeInfo, address, array, values, false) + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) - stats, err := GetArrayStats(array) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = append(expectedGChildValues, v) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Add one more element to grand child array which triggers inlined grand child array slab (NOT child array slab) becomes standalone slab + largeValue := NewStringValue(strings.Repeat("b", 20)) + largeValueSize := largeValue.ByteSize() + err = gchildArray.Append(largeValue) require.NoError(t, err) - require.Equal(t, uint64(0), stats.StorableSlabCount) - }) - t.Run("external slab", func(t *testing.T) { + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) - const arraySize = 4096 + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - r := newRand(t) + expectedGChildValues = append(expectedGChildValues, largeValue) + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues - stringSize := int(maxInlineArrayElementSize + 512) + require.False(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + expectedSlabID := valueIDToSlabID(gValueID) + require.Equal(t, expectedSlabID, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := arrayRootDataSlabPrefixSize + uint32(gchildArray.Count()-1)*vSize + largeValueSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + expectedStandaloneSlabSize := inlinedArrayDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + expectedParentSize = arrayRootDataSlabPrefixSize + expectedStandaloneSlabSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + _, err := gchildArray.Remove(gchildArray.Count() - 1) + require.NoError(t, err) + + expectedChildValues, ok := expectedValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) + + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] + expectedChildValues[0] = expectedGChildValues + expectedValues[0] = expectedChildValues + + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, two child array, one grand child array each, changes to child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} - array, err := NewArray(storage, address, typeInfo) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - - verifyArray(t, storage, typeInfo, address, array, values, false) - stats, err := GetArrayStats(array) - require.NoError(t, err) - require.Equal(t, uint64(arraySize), stats.StorableSlabCount) - }) -} + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) -func TestArrayStoredValue(t *testing.T) { + // Append element to grand child array + err = gchild.Append(v) + require.NoError(t, err) - const arraySize = 4096 + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedValues[i] = arrayValue{arrayValue{v}} + } - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - rootID := array.SlabID() + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + vSize*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - for { - id, slab := slabIterator() + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - if id == SlabIDUndefined { - break + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo } - value, err := slab.StoredValue(storage) + children := make([]arrayInfo, arraySize) - if id == rootID { + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - array2, ok := value.(*Array) + childArray, ok := e.(*Array) require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - verifyArray(t, storage, typeInfo, address, array2, values, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - } -} -func TestArrayPopIterate(t *testing.T) { + // Appending 7 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 7; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+2), childArray.Count()) + + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedChildValues = append(expectedChildValues, v) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedValues[j] = expectedChildValues - i := uint64(0) - err = array.PopIterate(func(v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root-dataslab", func(t *testing.T) { + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - const arraySize = 10 + // Test inlined grand child slab size (1 element, unchanged) + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(i+1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) + // Add one more element to child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + err = childArray.Append(v) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedChildValues = append(expectedChildValues, v) - const arraySize = 4096 + expectedValues[i] = expectedChildValues - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2+i, getStoredDeltas(storage)) // There are >1 stored slab because child array is no longer inlined. - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. + + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(SlabID{}).ByteSize()*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + // Remove one elements from each child array to trigger child arrays being inlined again. + expectedParentSize = arrayRootDataSlabPrefixSize + + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + _, err = childArray.Remove(childArray.Count() - 1) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) - }) -} + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) -func TestArrayFromBatchData(t *testing.T) { + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + expectedValues[i] = expectedChildValues - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) - require.Equal(t, uint64(0), array.Count()) + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2-i, getStoredDeltas(storage)) - iter, err := array.Iterator() - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - verifyEmptyArray(t, storage, typeInfo, address, copied) - }) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - t.Run("root-dataslab", func(t *testing.T) { + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - const arraySize = 10 + expectedParentSize += expectedInlinedChildSize - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + // Remove elements from child array. + elementCount := children[0].array.Count() - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + for i := uint64(0); i < elementCount-1; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + existingStorable, err := childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedChildValues = expectedChildValues[:len(expectedChildValues)-1] - const arraySize = 4096 + expectedValues[j] = expectedChildValues - typeInfo := testTypeInfo{42} + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - require.Equal(t, uint64(arraySize), array.Count()) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Test parent slab size + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - verifyArray(t, storage, typeInfo, address, copied, values, false) + for _, child := range children { + require.Equal(t, uint64(1), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + t.Run("parent is root metadata slab, with four child arrays, each child array has grand child arrays", func(t *testing.T) { + const arraySize = 4 typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - var values []Value - var v Value + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, v) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err = array.Insert(0, v) - require.NoError(t, err) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - err = array.Append(v) + // Append child array to parent + err = parentArray.Append(child) require.NoError(t, err) + + expectedValues[i] = arrayValue{arrayValue{}} } - require.Equal(t, uint64(36), array.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - iter, err := array.Iterator() - require.NoError(t, err) + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Test parent array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + children := make([]arrayInfo, arraySize) - typeInfo := testTypeInfo{42} + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - var values []Value - var v Value - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) - err = array.Append(v) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, nil) - copy(values[25+1:], values[25:]) - values[25] = v + // Appending 6 elements to grand child array so that parent array root slab is metadata slab. + for i := uint32(0); i < 6; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - err = array.Insert(25, v) - require.NoError(t, err) + err := gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) - require.Equal(t, uint64(36), array.Count()) + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - iter, err := array.Iterator() - require.NoError(t, err) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + expectedGChildValues = append(expectedGChildValues, v) + + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize*(i+1) + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Add one more element to grand child array which triggers parent array slab becomes metadata slab (all elements are still inlined). + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedGChildValues = append(expectedGChildValues, v) - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - const arraySize = 4096 + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because parent root slab is metdata. - r := newRand(t) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - typeInfo := testTypeInfo{42} + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + expectedInlinedChildSlabSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSlabSize, childArray.root.ByteSize()) - err := array.Append(v) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. + require.False(t, parentArray.root.IsData()) - iter, err := array.Iterator() - require.NoError(t, err) + // Add one more element to grand child array which triggers + // - child arrays become standalone slab (grand child arrays are still inlined) + // - parent array slab becomes data slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - storage := newTestPersistentStorage(t) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + for j := 0; j < 2; j++ { + err = gchildArray.Append(v) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + expectedGChildValues = append(expectedGChildValues, v) + } - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - SetThreshold(256) - defer SetThreshold(1024) + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) - r := newRand(t) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - var values []Value - var v Value + // Test standalone grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedStandaloneChildSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneChildSlabSize, childArray.root.ByteSize()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - iter, err := array.Iterator() - require.NoError(t, err) + // Parent array has one root data slab, 4 grand child array with standalone root data slab. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Remove elements from grand child array to trigger child array inlined again. + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + expectedChildValues, ok := expectedValues[i].(arrayValue) + require.True(t, ok) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) -} + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) -func TestArrayNestedStorables(t *testing.T) { + for j := 0; j < 2; j++ { + _, err = gchildArray.Remove(0) + require.NoError(t, err) - t.Parallel() + expectedGChildValues = expectedGChildValues[:len(expectedGChildValues)-1] + } - typeInfo := testTypeInfo{42} + expectedChildValues[0] = expectedGChildValues + expectedValues[i] = expectedChildValues - const arraySize = 1024 * 4 + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := strings.Repeat("a", int(i)) - v := SomeValue{Value: NewStringValue(s)} - values[i] = v + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err := array.Append(v) - require.NoError(t, err) - } + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - verifyArray(t, storage, typeInfo, address, array, values, true) -} + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) -func TestArrayMaxInlineElement(t *testing.T) { - t.Parallel() + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - r := newRand(t) + // Parent array has 1 metadata slab, and two data slab, all child and grand child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Remove elements from grand child array. + elementCount := children[0].child.array.Count() - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + for i := uint64(0); i < elementCount; i++ { + for j, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - var values []Value - for i := 0; i < 2; i++ { - // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. - v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) - values = append(values, v) + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, v, existingStorable) - err = array.Append(v) - require.NoError(t, err) - } + expectedChildValues, ok := expectedValues[j].(arrayValue) + require.True(t, ok) - require.True(t, array.root.IsData()) + expectedGChildValues, ok := expectedChildValues[0].(arrayValue) + require.True(t, ok) - // Size of root data slab with two elements of max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab), and minus 1 byte - // (for rounding when computing max inline array element size). - require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + expectedGChildValues = expectedGChildValues[1:] - verifyArray(t, storage, typeInfo, address, array, values, false) -} + expectedChildValues[0] = expectedGChildValues + expectedValues[j] = expectedChildValues -func TestArrayString(t *testing.T) { + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) - SetThreshold(256) - defer SetThreshold(1024) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - t.Run("small", func(t *testing.T) { - const arraySize = 6 + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(gchildArray.mutableElementIndex)) <= gchildArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - want := `[0 1 2 3 4 5]` - require.Equal(t, want, array.String()) + for _, child := range children { + require.Equal(t, uint64(0), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) + + expectedParentSize = uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) }) +} - t.Run("large", func(t *testing.T) { - const arraySize = 120 +func TestChildArrayWhenParentArrayIsModified(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + const arraySize = 2 - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) - } + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` - require.Equal(t, want, array.String()) - }) -} + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. -func TestArraySlabDump(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Test parent slab size with empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - array, err := NewArray(storage, address, typeInfo) + children := make([]*struct { + array *Array + valueID ValueID + parentIndex int + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i] = &struct { + array *Array + valueID ValueID + parentIndex int + }{ + childArray, valueID, i, } + } - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", - } - dumps, err := DumpArraySlabs(array) + t.Run("insert elements in parent array", func(t *testing.T) { + // insert value at index 0, so all child array indexes are moved by +1 + v := Uint64Value(0) + err := parentArray.Insert(0, v) require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + expectedValues = append(expectedValues, nil) + copy(expectedValues[1:], expectedValues) + expectedValues[0] = v - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + for i, child := range children { + childArray := child.array + childValueID := child.valueID - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + v := Uint64Value(i) + vSize := v.ByteSize() - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + err := childArray.Append(v) require.NoError(t, err) - } + require.Equal(t, uint64(1), childArray.Count()) - want := []string{ - "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", - "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", - "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", - } + child.parentIndex = i + 1 - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - t.Run("overflow", func(t *testing.T) { + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) - require.NoError(t, err) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - dumps, err := DumpArraySlabs(array) + // insert value at index 2, so only second child array index is moved by +1 + v = Uint64Value(2) + err = parentArray.Insert(2, v) require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} -func errorCategorizationCount(err error) int { - var fatalError *FatalError - var userError *UserError - var externalError *ExternalError + expectedValues = append(expectedValues, nil) + copy(expectedValues[3:], expectedValues[2:]) + expectedValues[2] = v - count := 0 - if errors.As(err, &fatalError) { - count++ - } - if errors.As(err, &userError) { - count++ - } - if errors.As(err, &externalError) { - count++ - } - return count -} + for i, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArrayLoadedValueIterator(t *testing.T) { + v := Uint64Value(i) + vSize := v.ByteSize() - SetThreshold(256) - defer SetThreshold(1024) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + if i > 0 { + child.parentIndex++ + } + + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) + + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - t.Run("empty", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - array, err := NewArray(storage, address, typeInfo) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // insert value at index 4, so none of child array indexes are affected. + v = Uint64Value(4) + err = parentArray.Insert(4, v) require.NoError(t, err) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedValues = append(expectedValues, nil) + expectedValues[4] = v - verifyArrayLoadedElements(t, array, nil) - }) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + v := Uint64Value(i) + vSize := v.ByteSize() - const arraySize = 3 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(3), childArray.Count()) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - verifyArrayLoadedElements(t, array, values) - }) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } }) - t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + t.Run("remove elements from parent array", func(t *testing.T) { + // remove value at index 0, so all child array indexes are moved by -1. + existingStorable, err := parentArray.Remove(0) + require.NoError(t, err) + require.Equal(t, Uint64Value(0), existingStorable) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + copy(expectedValues, expectedValues[1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - verifyArrayLoadedElements(t, array, values) + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(4), childArray.Count()) + + child.parentIndex-- - nestedArray, ok := v.(*Array) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove value at index 1, so only second child array index is moved by -1 + existingStorable, err = parentArray.Remove(1) + require.NoError(t, err) + require.Equal(t, Uint64Value(2), existingStorable) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + copy(expectedValues[1:], expectedValues[2:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - verifyArrayLoadedElements(t, array, values) + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(5), childArray.Count()) + + if i > 0 { + child.parentIndex-- + } - nestedArray, ok := v.(*Array) + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove value at index 2 (last element), so none of child array indexes are affected. + existingStorable, err = parentArray.Remove(2) + require.NoError(t, err) + require.Equal(t, Uint64Value(4), existingStorable) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - verifyArrayLoadedElements(t, array, values) + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload composite element in the middle - unloadValueIndex := 1 + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(6), childArray.Count()) - v := values[unloadValueIndex] + expectedChildValues, ok := expectedValues[child.parentIndex].(arrayValue) + require.True(t, ok) - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedChildValues = append(expectedChildValues, v) + expectedValues[child.parentIndex] = expectedChildValues - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + // Test array's mutableElementIndex + require.True(t, uint64(len(childArray.mutableElementIndex)) <= childArray.Count()) + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } }) +} - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createArrayWithEmptyChildArray( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + // Append child array to parent + err = array.Append(child) + require.NoError(t, err) - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - // At this point, iterator returned first element (v). + expectedValues[i] = arrayValue{} + } - // Remove all other nested composite elements (except first element) from storage. - for _, value := range values[1:] { - nestedArray, ok := value.(*Array) - require.True(t, ok) + return array, expectedValues +} - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } +func createArrayWithEmpty2LevelChildArray( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0], v) - i++ - return true, nil - }) + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) - - t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 3 - // Create an array with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - // parent array: 1 root data slab - // nested composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Append child array to parent + err = array.Append(child) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + expectedValues[i] = arrayValue{arrayValue{}} + } - // Unload composite element - v := values[nestedCompositeIndex].(*Array) + return array, expectedValues +} - err := storage.Remove(v.SlabID()) - require.NoError(t, err) +func getStoredDeltas(storage *PersistentSlabStorage) int { + count := 0 + for _, slab := range storage.deltas { + if slab != nil { + count++ + } + } + return count +} - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] +func TestArraySetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyArrayLoadedElements(t, array, values) - } - }) + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 - t.Run("root metadata slab with simple values", func(t *testing.T) { storage := newTestPersistentStorage(t) - const arraySize = 20 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - require.Equal(t, 3, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) - }) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + err = parentArray.Append(childArray) + require.NoError(t, err) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + err = childArray.Append(v) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + expectedChildValues = append(expectedChildValues, v) - t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + if !childArray.Inlined() { + break + } + } - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedValues = append(expectedValues, expectedChildValues) + } - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - nestedArray, ok := v.(*Array) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + valueEqual(t, expectedValues[i], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + + expectedValues[i] = Uint64Value(0) + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) } + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 + storage := newTestPersistentStorage(t) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + err = parentArray.Append(childArray) + require.NoError(t, err) + + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) + + expectedValues = append(expectedValues, arrayValue{v}) + } + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child array value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - nestedArray, ok := v.(*Array) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + valueEqual(t, expectedValues[i], child) + + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 + storage := newTestPersistentStorage(t) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + err = parentArray.Append(childMap) + require.NoError(t, err) + + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) + + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - v := values[index] + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - nestedArray, ok := v.(*Array) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + expectedValues[i] = Uint64Value(0) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) - t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 20 + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 - // Create an array with composite value at specified index. - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + storage := newTestPersistentStorage(t) - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+1, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Unload composite value - v := values[nestedCompositeIndex].(*Array) + k := Uint64Value(i) - err := storage.Remove(v.SlabID()) + err = parentArray.Append(childMap) require.NoError(t, err) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - verifyArrayLoadedElements(t, array, values) + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v } - }) - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Overwrite existing child map value + for i := 0; i < arraySize; i++ { + existingStorable, err := parentArray.Set(uint64(i), Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - verifyArrayLoadedElements(t, array, values) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + child, err := id.StoredValue(storage) + require.NoError(t, err) - // Unload data slabs from front to back - for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + valueEqual(t, expectedValues[i], child) - childHeader := metaDataSlab.childrenHeaders[i] + expectedValues[i] = Uint64Value(0) - err := storage.Remove(childHeader.slabID) + err = storage.Remove(SlabID(id)) require.NoError(t, err) + } - values = values[childHeader.count:] + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - verifyArrayLoadedElements(t, array, values) - } + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) }) +} + +func TestArrayRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("child array is not inlined", func(t *testing.T) { + const arraySize = 2 - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { storage := newTestPersistentStorage(t) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + err = parentArray.Append(childArray) + require.NoError(t, err) - // Unload data slabs from back to front - for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - childHeader := metaDataSlab.childrenHeaders[i] + err = childArray.Append(v) + require.NoError(t, err) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + expectedChildValues = append(expectedChildValues, v) - values = values[:len(values)-int(childHeader.count)] + if !childArray.Inlined() { + break + } + } - verifyArrayLoadedElements(t, array, values) + expectedValues = append(expectedValues, expectedChildValues) } - }) - - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - verifyArrayLoadedElements(t, array, values) + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - require.True(t, len(metaDataSlab.childrenHeaders) > 2) + child, err := id.StoredValue(storage) + require.NoError(t, err) - index := 1 - childHeader := metaDataSlab.childrenHeaders[index] + valueEqual(t, expectedValues[i], child) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) - values = values[:array.Count()-uint64(childHeader.count)] + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyArrayLoadedElements(t, array, values) + testEmptyArray(t, storage, typeInfo, address, parentArray) }) - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + t.Run("child array is inlined", func(t *testing.T) { + const arraySize = 2 + storage := newTestPersistentStorage(t) - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + var expectedValues arrayValue - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + for i := 0; i < arraySize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Unload non-root metadata slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + err = parentArray.Append(childArray) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - err := storage.Remove(childHeader.slabID) + err = childArray.Append(v) require.NoError(t, err) + require.True(t, childArray.Inlined()) - values = values[childHeader.count:] - - verifyArrayLoadedElements(t, array, values) + expectedValues = append(expectedValues, arrayValue{v}) } - }) - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + // Remove child array value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - // Unload non-root metadata slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + child, err := id.StoredValue(storage) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + valueEqual(t, expectedValues[i], child) - err := storage.Remove(childHeader.slabID) + err = storage.Remove(SlabID(id)) require.NoError(t, err) + } - values = values[childHeader.count:] + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) - verifyArrayLoadedElements(t, array, values) - } + testEmptyArray(t, storage, typeInfo, address, parentArray) }) - t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + t.Run("child map is not inlined", func(t *testing.T) { + const arraySize = 2 storage := newTestPersistentStorage(t) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - r := newRand(t) + err = parentArray.Append(childMap) + require.NoError(t, err) - // Unload random composite element - for len(values) > 0 { + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - i := r.Intn(len(values)) + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - v := values[i] + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + + if !childMap.Inlined() { + break + } + } + } + + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - nestedArray, ok := v.(*Array) + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + + testEmptyArray(t, storage, typeInfo, address, parentArray) }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + t.Run("child map is inlined", func(t *testing.T) { + const arraySize = 2 storage := newTestPersistentStorage(t) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + for i := 0; i < arraySize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + k := Uint64Value(i) - type slabInfo struct { - id SlabID - startIndex int - count int - } + err = parentArray.Append(childMap) + require.NoError(t, err) - count := 0 - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { - nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + expectedChildValues := make(mapValue) + expectedValues = append(expectedValues, expectedChildValues) - for _, h := range nonrootMetaDataSlab.childrenHeaders { - dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} - dataSlabInfos = append(dataSlabInfos, dataSlabInfo) - count += int(h.count) - } - } + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) - r := newRand(t) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Unload random data slab. - for len(dataSlabInfos) > 0 { - indexToUnload := r.Intn(len(dataSlabInfos)) + expectedChildValues[k] = v + } - slabInfoToUnload := dataSlabInfos[indexToUnload] + // Test array's mutableElementIndex + require.True(t, uint64(len(parentArray.mutableElementIndex)) <= parentArray.Count()) - // Update startIndex for all data slabs after indexToUnload. - for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabInfoToUnload.count - } + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Remove slabInfo to be unloaded from dataSlabInfos. - copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + // Remove child map value + for i := 0; i < arraySize; i++ { + valueStorable, err := parentArray.Remove(uint64(0)) + require.NoError(t, err) - err := storage.Remove(slabInfoToUnload.id) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) - values = values[:len(values)-slabInfoToUnload.count] + valueEqual(t, expectedValues[i], child) - verifyArrayLoadedElements(t, array, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - require.Equal(t, 0, len(values)) + // Test array's mutableElementIndex + require.Equal(t, 0, len(parentArray.mutableElementIndex)) + + testEmptyArray(t, storage, typeInfo, address, parentArray) }) +} - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { +func TestArrayWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("overwritten child array", func(t *testing.T) { storage := newTestPersistentStorage(t) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + var expectedValues arrayValue - verifyArrayLoadedElements(t, array, values) + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + // Insert child array to parent array + err = parentArray.Append(childArray) + require.NoError(t, err) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + v := NewStringValue(strings.Repeat("a", 10)) + + err = childArray.Append(v) + require.NoError(t, err) + + expectedValues = append(expectedValues, arrayValue{v}) + + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Overwrite child array value from parent + valueStorable, err := parentArray.Set(0, Uint64Value(0)) + require.NoError(t, err) + + id, ok := valueStorable.(SlabIDStorable) require.True(t, ok) - var dataSlabCount, metadataSlabCount int - nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + child, err := id.StoredValue(storage) + require.NoError(t, err) - nonrootMetadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: metadataSlabCount, - count: int(mheader.count), - } - metadataSlabCount += int(mheader.count) + valueEqual(t, expectedValues[0], child) - nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + expectedValues[0] = Uint64Value(0) + + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) + + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedValues, parentArray) + }) - children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) - for i, h := range nonrootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: dataSlabCount, - count: int(h.count), - } - dataSlabCount += int(h.count) - } + t.Run("removed child array", func(t *testing.T) { - nonrootMetadataSlabInfo.children = children - nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo - } + storage := newTestPersistentStorage(t) - r := newRand(t) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + var expectedValues arrayValue - for len(nonrootMetadataSlabInfos) > 0 { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + // Insert child array to parent array + err = parentArray.Append(childArray) + require.NoError(t, err) - // Unload random metadata or data slab. - switch r.Intn(maxSlabType) { + v := NewStringValue(strings.Repeat("a", 10)) - case metadataSlabType: - // Unload metadata slab at random index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + err = childArray.Append(v) + require.NoError(t, err) - isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + expectedValues = append(expectedValues, arrayValue{v}) - slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + testArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - count := slabInfoToBeRemoved.count + // Remove child array value from parent + valueStorable, err := parentArray.Remove(0) + require.NoError(t, err) - // Update startIndex for subsequence metadata and data slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + child, err := id.StoredValue(storage) + require.NoError(t, err) - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + valueEqual(t, expectedValues[0], child) - case dataSlabType: - // Unload data slab at randome index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + expectedValues = arrayValue{} - metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) - dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + // modify removed child array + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) - slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) - isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && - (dataSlabIndex == len(metaSlabInfo.children)-1) + // No-op on parent + valueEqual(t, expectedValues, parentArray) + }) +} - count := slabInfoToBeRemoved.count +func TestArraySetType(t *testing.T) { + typeInfo := testTypeInfo{42} + newTypeInfo := testTypeInfo{43} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Update startIndex for subsequence data slabs. - for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { - metaSlabInfo.children[i].startIndex -= count - } + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) - copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) - metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + // Create a new array in memory + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.True(t, array.root.IsData()) - metaSlabInfo.count -= count + // Modify type info of new array + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, array.Type()) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + // Commit new array to storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) + }) - if len(metaSlabInfo.children) == 0 { - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] - } - } + t.Run("data slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) - err := storage.Remove(slabInfoToBeRemoved.id) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + arraySize := 10 + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + err := array.Append(v) require.NoError(t, err) + } - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.True(t, array.root.IsData()) - verifyArrayLoadedElements(t, array, values) - } + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, array.Type()) - require.Equal(t, 0, len(values)) + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) }) -} -func createArrayWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { + t.Run("metadata slab root", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - values := make([]Value, arraySize) - r := rune('a') - for i := 0; i < arraySize; i++ { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) + arraySize := 10_000 + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) + err := array.Append(v) + require.NoError(t, err) + } - err := array.Append(values[i]) + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, typeInfo, array.Type()) + require.False(t, array.root.IsData()) + + err = array.SetType(newTypeInfo) require.NoError(t, err) - } + require.Equal(t, newTypeInfo, array.Type()) - return array, values -} + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) -func createArrayWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { + testExistingArraySetType(t, array.SlabID(), storage.baseStorage, newTypeInfo, array.Count()) + }) - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + t.Run("inlined in parent container root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) - expectedValues := make([]Value, arraySize) - for i := 0; i < arraySize; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + err = parentArray.Append(childArray) require.NoError(t, err) - expectedValues[i] = nested + require.Equal(t, uint64(1), parentArray.Count()) + require.Equal(t, typeInfo, parentArray.Type()) + require.True(t, parentArray.root.IsData()) + require.False(t, parentArray.Inlined()) - // Append nested array to parent - err = array.Append(nested) + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, typeInfo, childArray.Type()) + require.True(t, childArray.root.IsData()) + require.True(t, childArray.Inlined()) + + err = childArray.SetType(newTypeInfo) require.NoError(t, err) - } + require.Equal(t, newTypeInfo, childArray.Type()) - return array, expectedValues -} + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) -func createArrayWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, - compositeValueIndex int, -) (*Array, []Value) { - require.True(t, compositeValueIndex < arraySize) + testExistingInlinedArraySetType(t, parentArray.SlabID(), 0, storage.baseStorage, newTypeInfo, childArray.Count()) + }) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + t.Run("inlined in parent container non-root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) - values := make([]Value, arraySize) - r := 'a' - for i := 0; i < arraySize; i++ { + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) + arraySize := 10_000 + for i := 0; i < arraySize-1; i++ { + v := Uint64Value(i) + err := parentArray.Append(v) require.NoError(t, err) + } - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - values[i] = a - } else { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) - r++ - } + err = parentArray.Append(childArray) + require.NoError(t, err) + + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, typeInfo, parentArray.Type()) + require.False(t, parentArray.root.IsData()) + require.False(t, parentArray.Inlined()) + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, typeInfo, childArray.Type()) + require.True(t, childArray.root.IsData()) + require.True(t, childArray.Inlined()) - err = array.Append(values[i]) + err = childArray.SetType(newTypeInfo) require.NoError(t, err) - } + require.Equal(t, newTypeInfo, childArray.Type()) - return array, values -} + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) -func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i], v) - i++ - return true, nil + testExistingInlinedArraySetType(t, parentArray.SlabID(), arraySize-1, storage.baseStorage, newTypeInfo, childArray.Count()) }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) } -func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*ArrayMetaDataSlab); ok { - counter++ - } - } - return counter -} +func testExistingArraySetType( + t *testing.T, + id SlabID, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} -func TestArrayID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) - array, err := NewArray(storage, address, typeInfo) + // Load existing array by ID + array, err := NewArrayWithRootID(storage, id) require.NoError(t, err) + require.Equal(t, expectedCount, array.Count()) + require.Equal(t, expectedTypeInfo, array.Type()) - sid := array.SlabID() - id := array.ValueID() + // Modify type info of existing array + err = array.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, array.Count()) + require.Equal(t, newTypeInfo, array.Type()) - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing array again from storage + array2, err := NewArrayWithRootID(storage2, id) + require.NoError(t, err) + require.Equal(t, expectedCount, array2.Count()) + require.Equal(t, newTypeInfo, array2.Type()) } -func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { - const ( - arraySize = 3 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) +func testExistingInlinedArraySetType( + t *testing.T, + parentID SlabID, + inlinedChildIndex int, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) - array, err := NewArray(storage, address, typeInfo) + // Load existing array by ID + parentArray, err := NewArrayWithRootID(storage, parentID) require.NoError(t, err) - values := make([]*mutableValue, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := newMutableValue(initialStorableSize) - values[i] = v + element, err := parentArray.Get(uint64(inlinedChildIndex)) + require.NoError(t, err) - err := array.Append(v) - require.NoError(t, err) - } + childArray, ok := element.(*Array) + require.True(t, ok) - require.True(t, array.root.IsData()) + require.Equal(t, expectedCount, childArray.Count()) + require.Equal(t, expectedTypeInfo, childArray.Type()) - expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + // Modify type info of existing array + err = childArray.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, childArray.Count()) + require.Equal(t, newTypeInfo, childArray.Type()) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - mv := values[i] - mv.updateStorableSize(mutatedStorableSize) + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) - existingStorable, err := array.Set(i, mv) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + // Load existing array again from storage + parentArray2, err := NewArrayWithRootID(storage2, parentID) + require.NoError(t, err) - require.True(t, array.root.IsData()) + element2, err := parentArray2.Get(uint64(inlinedChildIndex)) + require.NoError(t, err) - expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + childArray2, ok := element2.(*Array) + require.True(t, ok) - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + require.Equal(t, expectedCount, childArray2.Count()) + require.Equal(t, newTypeInfo, childArray2.Type()) } diff --git a/basicarray.go b/basicarray.go index 6bb9d9f..1c7c399 100644 --- a/basicarray.go +++ b/basicarray.go @@ -76,7 +76,7 @@ func newBasicArrayDataSlabFromData( ) } - cborDec := decMode.NewByteStreamDecoder(data[2:]) + cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) elemCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -85,7 +85,7 @@ func newBasicArrayDataSlabFromData( elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") @@ -101,10 +101,17 @@ func newBasicArrayDataSlabFromData( func (a *BasicArrayDataSlab) Encode(enc *Encoder) error { - flag := maskBasicArray | maskSlabRoot + const version = 1 + + h, err := newArraySlabHead(version, slabBasicArray) + if err != nil { + return NewEncodingError(err) + } + + h.setRoot() // Encode flag - _, err := enc.Write([]byte{0x0, flag}) + _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } @@ -160,13 +167,7 @@ func (a *BasicArrayDataSlab) Set(storage SlabStorage, index uint64, v Storable) oldElem.ByteSize() + v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *BasicArrayDataSlab) Insert(storage SlabStorage, index uint64, v Storable) error { @@ -185,13 +186,7 @@ func (a *BasicArrayDataSlab) Insert(storage SlabStorage, index uint64, v Storabl a.header.count++ a.header.size += v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) - } - - return nil + return storeSlab(storage, a) } func (a *BasicArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, error) { @@ -214,10 +209,9 @@ func (a *BasicArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) + err := storeSlab(storage, a) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + return nil, err } return v, nil diff --git a/cmd/main/main.go b/cmd/main/main.go deleted file mode 100644 index 3f3aa1c..0000000 --- a/cmd/main/main.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Atree - Scalable Arrays and Ordered Maps - * - * Copyright Flow Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - - "github.com/onflow/atree" - - "github.com/fxamacker/cbor/v2" -) - -const cborTagUInt64Value = 164 - -type Uint64Value uint64 - -var _ atree.Value = Uint64Value(0) -var _ atree.Storable = Uint64Value(0) - -func (v Uint64Value) ChildStorables() []atree.Storable { - return nil -} - -func (v Uint64Value) StoredValue(_ atree.SlabStorage) (atree.Value, error) { - return v, nil -} - -func (v Uint64Value) Storable(_ atree.SlabStorage, _ atree.Address, _ uint64) (atree.Storable, error) { - return v, nil -} - -// Encode encodes UInt64Value as -// -// cbor.Tag{ -// Number: cborTagUInt64Value, -// Content: uint64(v), -// } -func (v Uint64Value) Encode(enc *atree.Encoder) error { - err := enc.CBOR.EncodeRawBytes([]byte{ - // tag number - 0xd8, cborTagUInt64Value, - }) - if err != nil { - return err - } - return enc.CBOR.EncodeUint64(uint64(v)) -} - -// TODO: cache size -func (v Uint64Value) ByteSize() uint32 { - // tag number (2 bytes) + encoded content - return 2 + atree.GetUintCBORSize(uint64(v)) -} - -func (v Uint64Value) String() string { - return fmt.Sprintf("%d", uint64(v)) -} - -type testTypeInfo struct{} - -var _ atree.TypeInfo = testTypeInfo{} - -func (testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint8(42) -} - -func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - _, ok := other.(testTypeInfo) - return ok -} - -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { - tagNumber, err := dec.DecodeTagNumber() - if err != nil { - return nil, err - } - - switch tagNumber { - case atree.CBORTagSlabID: - return atree.DecodeSlabIDStorable(dec) - - case cborTagUInt64Value: - n, err := dec.DecodeUint64() - if err != nil { - return nil, err - } - return Uint64Value(n), nil - - default: - return nil, fmt.Errorf("invalid tag number %d", tagNumber) - } -} - -// TODO: implement different slab size for metadata slab and data slab. -func main() { - var slabSize uint64 - var numElements uint64 - var verbose bool - - flag.Uint64Var(&slabSize, "size", 1024, "slab size in bytes") - flag.Uint64Var(&numElements, "count", 500, "number of elements in array") - flag.BoolVar(&verbose, "verbose", false, "verbose output") - - flag.Parse() - - minThreshold, maxThreshold, _, _ := atree.SetThreshold(slabSize) - - fmt.Printf( - "Inserting %d elements (uint64) into array with slab size %d, min size %d, and max size %d ...\n", - numElements, - slabSize, - minThreshold, - maxThreshold, - ) - - encMode, err := cbor.EncOptions{}.EncMode() - if err != nil { - fmt.Println(err) - return - } - - decMode, err := cbor.DecOptions{}.DecMode() - if err != nil { - fmt.Println(err) - return - } - - storage := atree.NewBasicSlabStorage(encMode, decMode, decodeStorable, decodeTypeInfo) - - typeInfo := testTypeInfo{} - - address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := atree.NewArray(storage, address, typeInfo) - - if err != nil { - fmt.Println(err) - return - } - - for i := uint64(0); i < numElements; i++ { - err := array.Append(Uint64Value(i)) - if err != nil { - fmt.Println(err) - return - } - } - - stats, err := atree.GetArrayStats(array) - if err != nil { - fmt.Println(err) - return - } - - fmt.Printf("%+v\n", stats) - - if verbose { - fmt.Printf("\n\n=========== array layout ===========\n") - atree.PrintArray(array) - } -} - -func decodeTypeInfo(_ *cbor.StreamDecoder) (atree.TypeInfo, error) { - return testTypeInfo{}, nil -} diff --git a/cmd/stress/array.go b/cmd/stress/array.go index 0fb1f38..adbaf63 100644 --- a/cmd/stress/array.go +++ b/cmd/stress/array.go @@ -21,6 +21,7 @@ package main import ( "fmt" "os" + "reflect" "runtime" "sync" "time" @@ -28,11 +29,17 @@ import ( "github.com/onflow/atree" ) +type arrayOpType int + const ( - arrayAppendOp = iota + arrayAppendOp arrayOpType = iota arrayInsertOp arraySetOp arrayRemoveOp + arrayMutateChildContainerAfterGet + arrayMutateChildContainerAfterAppend + arrayMutateChildContainerAfterInsert + arrayMutateChildContainerAfterSet maxArrayOp ) @@ -43,10 +50,14 @@ type arrayStatus struct { count uint64 // number of elements in array - appendOps uint64 - insertOps uint64 - setOps uint64 - removeOps uint64 + appendOps uint64 + insertOps uint64 + setOps uint64 + removeOps uint64 + mutateChildContainerAfterGetOps uint64 + mutateChildContainerAfterAppendOps uint64 + mutateChildContainerAfterInsertOps uint64 + mutateChildContainerAfterSetOps uint64 } var _ Status = &arrayStatus{} @@ -64,7 +75,7 @@ func (status *arrayStatus) String() string { var m runtime.MemStats runtime.ReadMemStats(&m) - return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d appends, %d sets, %d inserts, %d removes", + return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d appends, %d sets, %d inserts, %d removes, %d Get mutations, %d Append mutations, %d Insert mutations, %d Set mutations", duration.Truncate(time.Second).String(), m.Alloc/1024/1024, status.count, @@ -72,38 +83,44 @@ func (status *arrayStatus) String() string { status.setOps, status.insertOps, status.removeOps, + status.mutateChildContainerAfterGetOps, + status.mutateChildContainerAfterAppendOps, + status.mutateChildContainerAfterInsertOps, + status.mutateChildContainerAfterSetOps, ) } -func (status *arrayStatus) incAppend() { +func (status *arrayStatus) incOp(op arrayOpType, newTotalCount uint64) { status.lock.Lock() defer status.lock.Unlock() - status.appendOps++ - status.count++ -} + switch op { + case arrayAppendOp: + status.appendOps++ -func (status *arrayStatus) incSet() { - status.lock.Lock() - defer status.lock.Unlock() + case arrayInsertOp: + status.insertOps++ - status.setOps++ -} + case arraySetOp: + status.setOps++ -func (status *arrayStatus) incInsert() { - status.lock.Lock() - defer status.lock.Unlock() + case arrayRemoveOp: + status.removeOps++ - status.insertOps++ - status.count++ -} + case arrayMutateChildContainerAfterGet: + status.mutateChildContainerAfterGetOps++ -func (status *arrayStatus) incRemove() { - status.lock.Lock() - defer status.lock.Unlock() + case arrayMutateChildContainerAfterAppend: + status.mutateChildContainerAfterAppendOps++ + + case arrayMutateChildContainerAfterInsert: + status.mutateChildContainerAfterInsertOps++ + + case arrayMutateChildContainerAfterSet: + status.mutateChildContainerAfterSetOps++ + } - status.removeOps++ - status.count-- + status.count = newTotalCount } func (status *arrayStatus) Write() { @@ -113,13 +130,11 @@ func (status *arrayStatus) Write() { func testArray( storage *atree.PersistentSlabStorage, address atree.Address, - typeInfo atree.TypeInfo, - maxLength uint64, status *arrayStatus, - minHeapAllocMiB uint64, - maxHeapAllocMiB uint64, ) { + typeInfo := newArrayTypeInfo() + // Create new array array, err := atree.NewArray(storage, address, typeInfo) if err != nil { @@ -127,12 +142,12 @@ func testArray( return } - // values contains array elements in the same order. It is used to check data loss. - values := make([]atree.Value, 0, maxLength) + // expectedValues contains array elements in the same order. It is used to check data loss. + expectedValues := make(arrayValue, 0, flagMaxLength) reduceHeapAllocs := false - opCount := uint64(0) + opCountForStorageHealthCheck := uint64(0) var m runtime.MemStats @@ -140,10 +155,10 @@ func testArray( runtime.ReadMemStats(&m) allocMiB := m.Alloc / 1024 / 1024 - if !reduceHeapAllocs && allocMiB > maxHeapAllocMiB { + if !reduceHeapAllocs && allocMiB > flagMaxHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, removing elements to reduce allocs...\n", allocMiB) reduceHeapAllocs = true - } else if reduceHeapAllocs && allocMiB < minHeapAllocMiB { + } else if reduceHeapAllocs && allocMiB < flagMinHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, resuming random operation...\n", allocMiB) reduceHeapAllocs = false } @@ -178,245 +193,404 @@ func testArray( fmt.Printf("\nHeapAlloc is %d MiB after cleanup and forced gc\n", allocMiB) // Prevent infinite loop that doesn't do useful work. - if allocMiB > maxHeapAllocMiB { + if allocMiB > flagMaxHeapAllocMiB { // This shouldn't happen unless there's a memory leak. fmt.Fprintf( os.Stderr, "Exiting because allocMiB %d > maxMapHeapAlloMiB %d with empty map\n", allocMiB, - maxHeapAllocMiB) + flagMaxHeapAllocMiB) return } } - nextOp := r.Intn(maxArrayOp) + var forceRemove bool + if array.Count() == flagMaxLength || reduceHeapAllocs { + forceRemove = true + } - if array.Count() == maxLength || reduceHeapAllocs { - nextOp = arrayRemoveOp + var prevOp arrayOpType + expectedValues, prevOp, err = modifyArray(expectedValues, array, maxNestedLevels, forceRemove) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + return } - switch nextOp { + opCountForStorageHealthCheck++ - case arrayAppendOp: - opCount++ + // Update status + status.incOp(prevOp, array.Count()) - nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) + // Check array elements against values after every op + err = checkArrayDataLoss(expectedValues, array) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } + + if opCountForStorageHealthCheck >= flagMinOpsForStorageHealthCheck { + opCountForStorageHealthCheck = 0 + + if !checkStorageHealth(storage, array.SlabID()) { return } - copiedValue, err := copyValue(storage, atree.Address{}, v) + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) return } - // Append to values - values = append(values, copiedValue) + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() + } + } +} + +func nextArrayOp( + expectedValues arrayValue, + array *atree.Array, + nestedLevels int, + forceRemove bool, +) (arrayOpType, error) { - // Append to array - err = array.Append(v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to append %s: %s", v, err) - return - } + if forceRemove { + if array.Count() == 0 { + return 0, fmt.Errorf("failed to force remove array elements because array has no elements") + } + return arrayRemoveOp, nil + } + + if array.Count() == 0 { + return arrayAppendOp, nil + } - // Update status - status.incAppend() + for { + nextOp := arrayOpType(r.Intn(int(maxArrayOp))) - case arraySetOp: - opCount++ + switch nextOp { + case arrayMutateChildContainerAfterAppend, + arrayMutateChildContainerAfterInsert, + arrayMutateChildContainerAfterSet: - if array.Count() == 0 { - continue + if nestedLevels-1 > 0 { + return nextOp, nil } - k := r.Intn(int(array.Count())) + // New child container can't be created because next nestedLevels is 0. + // Try another array operation. - nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) - return + case arrayMutateChildContainerAfterGet: + if hasChildContainerInArray(expectedValues) { + return nextOp, nil } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) - return - } + // Array doesn't have child container, try another array operation. - oldV := values[k] + default: + return nextOp, nil + } + } +} - // Update values - values[k] = copiedValue +func modifyArray( + expectedValues arrayValue, + array *atree.Array, + nestedLevels int, + forceRemove bool, +) (arrayValue, arrayOpType, error) { - // Update array - existingStorable, err := array.Set(uint64(k), v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", v, k, err) - return - } + storage := array.Storage + address := array.Address() - // Compare overwritten value from array with overwritten value from values - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } + nextOp, err := nextArrayOp(expectedValues, array, nestedLevels, forceRemove) + if err != nil { + return nil, 0, err + } - err = valueEqual(oldV, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldV, err) - return - } + switch nextOp { + case arrayAppendOp, arrayMutateChildContainerAfterAppend: + + var nextNestedLevels int + + switch nextOp { + case arrayAppendOp: + nextNestedLevels = r.Intn(nestedLevels) + case arrayMutateChildContainerAfterAppend: + nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") + } - // Delete overwritten element from storage - err = removeStorable(storage, existingStorable) + // Create new chid child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) + } + + // Update expectedValues + expectedValues = append(expectedValues, expectedChildValue) + + // Update array + err = array.Append(child) + if err != nil { + return nil, 0, fmt.Errorf("failed to append %s: %s", child, err) + } + + if nextOp == arrayMutateChildContainerAfterAppend { + index := len(expectedValues) - 1 + + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove storable %s: %s", existingStorable, err) - return + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) } + } + + case arraySetOp, arrayMutateChildContainerAfterSet: + + var nextNestedLevels int + + switch nextOp { + case arraySetOp: + nextNestedLevels = r.Intn(nestedLevels) + case arrayMutateChildContainerAfterSet: + nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") + } + + // Create new child child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) + } + + index := r.Intn(int(array.Count())) + + oldExpectedValue := expectedValues[index] + + // Update expectedValues + expectedValues[index] = expectedChildValue + + // Update array + existingStorable, err := array.Set(uint64(index), child) + if err != nil { + return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", child, index, err) + } + + // Compare overwritten value from array with overwritten value from expectedValues + existingValue, err := existingStorable.StoredValue(storage) + if err != nil { + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) + } + + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, 0, fmt.Errorf("failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) + } + + // Delete overwritten element from storage + err = removeStorable(storage, existingStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove storable %s: %s", existingStorable, err) + } - err = removeValue(storage, oldV) + if nextOp == arrayMutateChildContainerAfterSet { + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", oldV, err) - return + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) } + } + + case arrayInsertOp, arrayMutateChildContainerAfterInsert: - // Update status - status.incSet() + var nextNestedLevels int + switch nextOp { case arrayInsertOp: - opCount++ + nextNestedLevels = r.Intn(nestedLevels) + case arrayMutateChildContainerAfterInsert: + nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") + } - k := r.Intn(int(array.Count() + 1)) + // Create new child child + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) + } - nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) - return - } + index := r.Intn(int(array.Count() + 1)) - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", v, err) - return - } + // Update expectedValues + if index == int(array.Count()) { + expectedValues = append(expectedValues, expectedChildValue) + } else { + expectedValues = append(expectedValues, nil) + copy(expectedValues[index+1:], expectedValues[index:]) + expectedValues[index] = expectedChildValue + } - // Update values - if k == int(array.Count()) { - values = append(values, copiedValue) - } else { - values = append(values, nil) - copy(values[k+1:], values[k:]) - values[k] = copiedValue - } + // Update array + err = array.Insert(uint64(index), child) + if err != nil { + return nil, 0, fmt.Errorf("failed to insert %s into index %d: %s", child, index, err) + } - // Update array - err = array.Insert(uint64(k), v) + if nextOp == arrayMutateChildContainerAfterInsert { + expectedValues[index], err = modifyContainer(expectedValues[index], child, nextNestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to insert %s into index %d: %s", v, k, err) - return + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) } + } - // Update status - status.incInsert() + case arrayRemoveOp: + index := r.Intn(int(array.Count())) - case arrayRemoveOp: - if array.Count() == 0 { - continue - } + oldExpectedValue := expectedValues[index] - opCount++ + // Update expectedValues + copy(expectedValues[index:], expectedValues[index+1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - k := r.Intn(int(array.Count())) + // Update array + existingStorable, err := array.Remove(uint64(index)) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove element at index %d: %s", index, err) + } - oldV := values[k] + // Compare removed value from array with removed value from values + existingValue, err := existingStorable.StoredValue(storage) + if err != nil { + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) + } - // Update values - copy(values[k:], values[k+1:]) - values[len(values)-1] = nil - values = values[:len(values)-1] + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, 0, fmt.Errorf("failed to compare %s and %s: %s", existingValue, oldExpectedValue, err) + } - // Update array - existingStorable, err := array.Remove(uint64(k)) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element at index %d: %s", k, err) - return - } + // Delete removed element from storage + err = removeStorable(storage, existingStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove element %s: %s", existingStorable, err) + } - // Compare removed value from array with removed value from values - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } + case arrayMutateChildContainerAfterGet: + index, found := getRandomChildContainerIndexInArray(expectedValues) + if !found { + // arrayMutateChildContainerAfterGet op can't be performed because there isn't any child container in this array. + // Try another array operation. + return modifyArray(expectedValues, array, nestedLevels, forceRemove) + } - err = valueEqual(oldV, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to compare %s and %s: %s", existingValue, oldV, err) - return - } + child, err := array.Get(uint64(index)) + if err != nil { + return nil, 0, fmt.Errorf("failed to get element from array at index %d: %s", index, err) + } - // Delete removed element from storage - err = removeStorable(storage, existingStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element %s: %s", existingStorable, err) - return - } + expectedValues[index], err = modifyContainer(expectedValues[index], child, nestedLevels-1) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at index %d: %w", index, err) + } + } - err = removeValue(storage, oldV) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied removed value %s: %s", oldV, err) - return - } + return expectedValues, nextOp, nil +} + +func modifyContainer(expectedValue atree.Value, value atree.Value, nestedLevels int) (expected atree.Value, err error) { - // Update status - status.incRemove() + switch value := value.(type) { + case *atree.Array: + expectedArrayValue, ok := expectedValue.(arrayValue) + if !ok { + return nil, fmt.Errorf("failed to get expected value of type arrayValue: got %T", expectedValue) } - // Check array elements against values after every op - err = checkArrayDataLoss(array, values) + expectedValue, _, err = modifyArray(expectedArrayValue, value, nestedLevels, false) if err != nil { - fmt.Fprintln(os.Stderr, err) - return + return nil, err } - if opCount >= 100 { - opCount = 0 - rootIDs, err := atree.CheckStorageHealth(storage, -1) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return - } - ids := make([]atree.SlabID, 0, len(rootIDs)) - for id := range rootIDs { - // filter out root ids with empty address - if !id.HasTempAddress() { - ids = append(ids, id) - } - } - if len(ids) != 1 || ids[0] != array.SlabID() { - fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, array.SlabID()) - return - } + case *atree.OrderedMap: + expectedMapValue, ok := expectedValue.(mapValue) + if !ok { + return nil, fmt.Errorf("failed to get expected value of type mapValue: got %T", expectedValue) + } + + expectedValue, _, err = modifyMap(expectedMapValue, value, nestedLevels, false) + if err != nil { + return nil, err } + + default: + return nil, fmt.Errorf("failed to get container: got %T", value) } + + return expectedValue, nil } -func checkArrayDataLoss(array *atree.Array, values []atree.Value) error { +func hasChildContainerInArray(expectedValues arrayValue) bool { + for _, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + return true + } + } + return false +} + +func getRandomChildContainerIndexInArray(expectedValues arrayValue) (index int, found bool) { + indexes := make([]int, 0, len(expectedValues)) + for i, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + indexes = append(indexes, i) + } + } + if len(indexes) == 0 { + return 0, false + } + return indexes[r.Intn(len(indexes))], true +} + +func checkStorageHealth(storage *atree.PersistentSlabStorage, rootSlabID atree.SlabID) bool { + rootIDs, err := atree.CheckStorageHealth(storage, -1) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + + // Filter out slabs with temp address because + // child array/map values used for data loss check is stored with temp address. + ids := make([]atree.SlabID, 0, len(rootIDs)) + for id := range rootIDs { + // filter out root ids with empty address + if !id.HasTempAddress() { + ids = append(ids, id) + } + } + + if len(ids) != 1 || ids[0] != rootSlabID { + fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, rootSlabID) + return false + } + + return true +} + +func checkArrayDataLoss(expectedValues arrayValue, array *atree.Array) error { // Check array has the same number of elements as values - if array.Count() != uint64(len(values)) { - return fmt.Errorf("Count() %d != len(values) %d", array.Count(), len(values)) + if array.Count() != uint64(len(expectedValues)) { + return fmt.Errorf("Count() %d != len(values) %d", array.Count(), len(expectedValues)) } // Check every element - for i, v := range values { + for i, v := range expectedValues { convertedValue, err := array.Get(uint64(i)) if err != nil { return fmt.Errorf("failed to get element at %d: %w", i, err) @@ -427,5 +601,30 @@ func checkArrayDataLoss(array *atree.Array, values []atree.Value) error { } } + if flagCheckSlabEnabled { + err := checkArraySlab(array) + if err != nil { + return err + } + } + return nil } + +func checkArraySlab(array *atree.Array) error { + err := atree.VerifyArray(array, array.Address(), array.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + return atree.VerifyArraySerialization( + array, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) +} diff --git a/cmd/stress/main.go b/cmd/stress/main.go index b9e14c6..fbd735b 100644 --- a/cmd/stress/main.go +++ b/cmd/stress/main.go @@ -70,25 +70,47 @@ func updateStatus(sigc <-chan os.Signal, status Status) { } } -func main() { +var cborEncMode = func() cbor.EncMode { + encMode, err := cbor.EncOptions{}.EncMode() + if err != nil { + panic(fmt.Sprintf("Failed to create CBOR encoding mode: %s", err)) + } + return encMode +}() + +var cborDecMode = func() cbor.DecMode { + decMode, err := cbor.DecOptions{}.DecMode() + if err != nil { + panic(fmt.Sprintf("Failed to create CBOR decoding mode: %s\n", err)) + } + return decMode +}() + +var ( + flagType string + flagCheckSlabEnabled bool + flagMaxLength uint64 + flagSeedHex string + flagMinHeapAllocMiB, flagMaxHeapAllocMiB uint64 + flagMinOpsForStorageHealthCheck uint64 +) - var typ string - var maxLength uint64 - var seedHex string - var minHeapAllocMiB, maxHeapAllocMiB uint64 +func main() { - flag.StringVar(&typ, "type", "array", "array or map") - flag.Uint64Var(&maxLength, "maxlen", 10_000, "max number of elements") - flag.StringVar(&seedHex, "seed", "", "seed for prng in hex (default is Unix time)") - flag.Uint64Var(&minHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") - flag.Uint64Var(&maxHeapAllocMiB, "maxheap", 2000, "max HeapAlloc in MiB to trigger extra removal of elements") + flag.StringVar(&flagType, "type", "array", "array or map") + flag.BoolVar(&flagCheckSlabEnabled, "slabcheck", false, "in memory and serialized slab check") + flag.Uint64Var(&flagMinOpsForStorageHealthCheck, "minOpsForStorageHealthCheck", 100, "number of operations for storage health check") + flag.Uint64Var(&flagMaxLength, "maxlen", 10_000, "max number of elements") + flag.StringVar(&flagSeedHex, "seed", "", "seed for prng in hex (default is Unix time)") + flag.Uint64Var(&flagMinHeapAllocMiB, "minheap", 1000, "min HeapAlloc in MiB to stop extra removal of elements") + flag.Uint64Var(&flagMaxHeapAllocMiB, "maxheap", 2000, "max HeapAlloc in MiB to trigger extra removal of elements") flag.Parse() var seed int64 - if len(seedHex) != 0 { + if len(flagSeedHex) != 0 { var err error - seed, err = strconv.ParseInt(strings.ReplaceAll(seedHex, "0x", ""), 16, 64) + seed, err = strconv.ParseInt(strings.ReplaceAll(flagSeedHex, "0x", ""), 16, 64) if err != nil { panic("Failed to parse seed flag (hex string)") } @@ -96,9 +118,9 @@ func main() { r = newRand(seed) - typ = strings.ToLower(typ) + flagType = strings.ToLower(flagType) - if typ != "array" && typ != "map" { + if flagType != "array" && flagType != "map" { fmt.Fprintf(os.Stderr, "Please specify type as either \"array\" or \"map\"") return } @@ -106,52 +128,49 @@ func main() { sigc := make(chan os.Signal, 1) signal.Notify(sigc, os.Interrupt, syscall.SIGTERM) - // Create storage - encMode, err := cbor.EncOptions{}.EncMode() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create CBOR encoding mode: %s\n", err) - return - } - - decMode, err := cbor.DecOptions{}.DecMode() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to create CBOR decoding mode: %s\n", err) - return - } - baseStorage := NewInMemBaseStorage() storage := atree.NewPersistentSlabStorage( baseStorage, - encMode, - decMode, + cborEncMode, + cborDecMode, decodeStorable, decodeTypeInfo, ) - typeInfo := testTypeInfo{value: 123} - address := atree.Address{1, 2, 3, 4, 5, 6, 7, 8} - switch typ { + switch flagType { case "array": - fmt.Printf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", minHeapAllocMiB, maxHeapAllocMiB) + var msg string + if flagCheckSlabEnabled { + msg = fmt.Sprintf("Starting array stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } else { + msg = fmt.Sprintf("Starting array stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } + fmt.Println(msg) status := newArrayStatus() go updateStatus(sigc, status) - testArray(storage, address, typeInfo, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testArray(storage, address, status) case "map": - fmt.Printf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB\n", minHeapAllocMiB, maxHeapAllocMiB) + var msg string + if flagCheckSlabEnabled { + msg = fmt.Sprintf("Starting map stress test with slab check, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } else { + msg = fmt.Sprintf("Starting map stress test, minMapHeapAlloc = %d MiB, maxMapHeapAlloc = %d MiB", flagMinHeapAllocMiB, flagMaxHeapAllocMiB) + } + fmt.Println(msg) status := newMapStatus() go updateStatus(sigc, status) - testMap(storage, address, typeInfo, maxLength, status, minHeapAllocMiB, maxHeapAllocMiB) + testMap(storage, address, status) } } diff --git a/cmd/stress/map.go b/cmd/stress/map.go index a150f8f..0e11b62 100644 --- a/cmd/stress/map.go +++ b/cmd/stress/map.go @@ -21,6 +21,7 @@ package main import ( "fmt" "os" + "reflect" "runtime" "sync" "time" @@ -28,11 +29,15 @@ import ( "github.com/onflow/atree" ) +type mapOpType int + const ( - mapSetOp1 = iota + mapSetOp1 mapOpType = iota mapSetOp2 mapSetOp3 mapRemoveOp + mapMutateChildContainerAfterGet + mapMutateChildContainerAfterSet maxMapOp ) @@ -43,8 +48,10 @@ type mapStatus struct { count uint64 // number of elements in map - setOps uint64 - removeOps uint64 + setOps uint64 + removeOps uint64 + mutateChildContainerAfterGetOps uint64 + mutateChildContainerAfterSetOps uint64 } var _ Status = &mapStatus{} @@ -62,32 +69,36 @@ func (status *mapStatus) String() string { var m runtime.MemStats runtime.ReadMemStats(&m) - return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d sets, %d removes", + return fmt.Sprintf("duration %s, heapAlloc %d MiB, %d elements, %d sets, %d removes, %d Get mutations, %d Set mutations", duration.Truncate(time.Second).String(), m.Alloc/1024/1024, status.count, status.setOps, status.removeOps, + status.mutateChildContainerAfterGetOps, + status.mutateChildContainerAfterSetOps, ) } -func (status *mapStatus) incSet(newValue bool) { +func (status *mapStatus) incOp(op mapOpType, newTotalCount uint64) { status.lock.Lock() defer status.lock.Unlock() - status.setOps++ + switch op { + case mapSetOp1, mapSetOp2, mapSetOp3: + status.setOps++ - if newValue { - status.count++ - } -} + case mapRemoveOp: + status.removeOps++ -func (status *mapStatus) incRemove() { - status.lock.Lock() - defer status.lock.Unlock() + case mapMutateChildContainerAfterGet: + status.mutateChildContainerAfterGetOps++ - status.removeOps++ - status.count-- + case mapMutateChildContainerAfterSet: + status.mutateChildContainerAfterSetOps++ + } + + status.count = newTotalCount } func (status *mapStatus) Write() { @@ -97,12 +108,9 @@ func (status *mapStatus) Write() { func testMap( storage *atree.PersistentSlabStorage, address atree.Address, - typeInfo atree.TypeInfo, - maxLength uint64, status *mapStatus, - minHeapAllocMiB uint64, - maxHeapAllocMiB uint64, ) { + typeInfo := newMapTypeInfo() m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo) if err != nil { @@ -110,15 +118,12 @@ func testMap( return } - // elements contains generated keys and values. It is used to check data loss. - elements := make(map[atree.Value]atree.Value, maxLength) - - // keys contains generated keys. It is used to select random keys for removal. - keys := make([]atree.Value, 0, maxLength) + // expectedValues contains generated keys and values. It is used to check data loss. + expectedValues := make(mapValue, flagMaxLength) reduceHeapAllocs := false - opCount := uint64(0) + opCountForStorageHealthCheck := uint64(0) var ms runtime.MemStats @@ -126,10 +131,10 @@ func testMap( runtime.ReadMemStats(&ms) allocMiB := ms.Alloc / 1024 / 1024 - if !reduceHeapAllocs && allocMiB > maxHeapAllocMiB { + if !reduceHeapAllocs && allocMiB > flagMaxHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, removing elements to reduce allocs...\n", allocMiB) reduceHeapAllocs = true - } else if reduceHeapAllocs && allocMiB < minHeapAllocMiB { + } else if reduceHeapAllocs && allocMiB < flagMinHeapAllocMiB { fmt.Printf("\nHeapAlloc is %d MiB, resuming random operation...\n", allocMiB) reduceHeapAllocs = false } @@ -148,7 +153,7 @@ func testMap( storage.DropDeltas() storage.DropCache() - elements = make(map[atree.Value]atree.Value, maxLength) + expectedValues = make(map[atree.Value]atree.Value, flagMaxLength) // Load root slab from storage and cache it in read cache rootID := m.SlabID() @@ -166,230 +171,292 @@ func testMap( fmt.Printf("\nHeapAlloc is %d MiB after cleanup and forced gc\n", allocMiB) // Prevent infinite loop that doesn't do useful work. - if allocMiB > maxHeapAllocMiB { + if allocMiB > flagMaxHeapAllocMiB { // This shouldn't happen unless there's a memory leak. fmt.Fprintf( os.Stderr, "Exiting because allocMiB %d > maxMapHeapAlloMiB %d with empty map\n", allocMiB, - maxHeapAllocMiB) + flagMaxHeapAllocMiB) return } } - nextOp := r.Intn(maxMapOp) + var forceRemove bool + if m.Count() == flagMaxLength || reduceHeapAllocs { + forceRemove = true + } - if m.Count() == maxLength || reduceHeapAllocs { - nextOp = mapRemoveOp + var prevOp mapOpType + expectedValues, prevOp, err = modifyMap(expectedValues, m, maxNestedLevels, forceRemove) + if err != nil { + fmt.Fprint(os.Stderr, err.Error()) + return } - switch nextOp { + opCountForStorageHealthCheck++ - case mapSetOp1, mapSetOp2, mapSetOp3: - opCount++ + // Update status + status.incOp(prevOp, m.Count()) - k, err := randomKey() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random key %s: %s", k, err) - return - } + // Check map elements against elements after every op + err = checkMapDataLoss(expectedValues, m) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return + } - nestedLevels := r.Intn(maxNestedLevels) - v, err := randomValue(storage, address, nestedLevels) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate random value %s: %s", v, err) - return - } + if opCountForStorageHealthCheck >= flagMinOpsForStorageHealthCheck { + opCountForStorageHealthCheck = 0 - copiedKey, err := copyValue(storage, atree.Address{}, k) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random key %s: %s", k, err) + if !checkStorageHealth(storage, m.SlabID()) { return } - copiedValue, err := copyValue(storage, atree.Address{}, v) + // Commit slabs to storage so slabs are encoded and then decoded at next op. + err = storage.FastCommit(runtime.NumCPU()) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to copy random value %s: %s", k, err) + fmt.Fprintf(os.Stderr, "Failed to commit to storage: %s", err) return } - oldV := elements[copiedKey] + // Drop cache after commit to force slab decoding at next op. + storage.DropCache() + } + } +} - // Update keys - if oldV == nil { - keys = append(keys, copiedKey) - } +func nextMapOp( + expectedValues mapValue, + m *atree.OrderedMap, + nestedLevels int, + forceRemove bool, +) (mapOpType, error) { - // Update elements - elements[copiedKey] = copiedValue + if forceRemove { + if m.Count() == 0 { + return 0, fmt.Errorf("failed to force remove map elements because map has no elements") + } + return mapRemoveOp, nil + } - // Update map - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to set %s at index %d: %s", v, k, err) - return - } + if m.Count() == 0 { + return mapSetOp1, nil + } - // Compare old value from map with old value from elements - if (oldV == nil) && (existingStorable != nil) { - fmt.Fprintf(os.Stderr, "Set returned storable %s, want nil", existingStorable) - return - } + for { + nextOp := mapOpType(r.Intn(int(maxMapOp))) - if (oldV != nil) && (existingStorable == nil) { - fmt.Fprintf(os.Stderr, "Set returned nil, want %s", oldV) - return + switch nextOp { + case mapMutateChildContainerAfterSet: + if nestedLevels-1 > 0 { + return nextOp, nil } - if existingStorable != nil { - - existingValue, err := existingStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingStorable, err) - return - } - - err = valueEqual(oldV, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Set() returned wrong existing value %s, want %s", existingValue, oldV) - return - } - - err = removeStorable(storage, existingStorable) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove map storable element %s: %s", existingStorable, err) - return - } - - err = removeValue(storage, oldV) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied overwritten value %s: %s", existingValue, err) - return - } + // New child container can't be created because next nestedLevels is 0. + // Try another map operation. + + case mapMutateChildContainerAfterGet: + if hasChildContainerInMap(expectedValues) { + return nextOp, nil } - // Update status - status.incSet(oldV == nil) + // Map doesn't have child container, try another map operation. - case mapRemoveOp: - if m.Count() == 0 { - continue - } + default: + return nextOp, nil + } + } +} - opCount++ +func modifyMap( + expectedValues mapValue, + m *atree.OrderedMap, + nestedLevels int, + forceRemove bool, +) (mapValue, mapOpType, error) { - index := r.Intn(len(keys)) - k := keys[index] + storage := m.Storage + address := m.Address() - oldV := elements[k] + nextOp, err := nextMapOp(expectedValues, m, nestedLevels, forceRemove) + if err != nil { + return nil, 0, err + } - // Update elements - delete(elements, k) + switch nextOp { + case mapSetOp1, mapSetOp2, mapSetOp3, mapMutateChildContainerAfterSet: - // Update keys - copy(keys[index:], keys[index+1:]) - keys[len(keys)-1] = nil - keys = keys[:len(keys)-1] + var nextNestedLevels int - // Update map - existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, k) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove element with key %s: %s", k, err) - return - } + switch nextOp { + case mapSetOp1, mapSetOp2, mapSetOp3: + nextNestedLevels = r.Intn(nestedLevels) + case mapMutateChildContainerAfterSet: + nextNestedLevels = nestedLevels - 1 + default: + panic("not reachable") + } - // Compare removed key from map with removed key from elements - existingKeyValue, err := existingKeyStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingKeyStorable, err) - return - } + expectedKey, key, err := randomKey() + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random key %s: %s", key, err) + } - err = valueEqual(k, existingKeyValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing key %s, want %s", existingKeyStorable, k) - return - } + expectedChildValue, child, err := randomValue(storage, address, nextNestedLevels) + if err != nil { + return nil, 0, fmt.Errorf("failed to generate random value %s: %s", child, err) + } - // Compare removed value from map with removed value from elements - existingValue, err := existingValueStorable.StoredValue(storage) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to convert %s to value: %s", existingValueStorable, err) - return - } + oldExpectedValue := expectedValues[expectedKey] - err = valueEqual(oldV, existingValue) - if err != nil { - fmt.Fprintf(os.Stderr, "Remove() returned wrong existing value %s, want %s", existingValueStorable, oldV) - return - } + // Update expectedValues + expectedValues[expectedKey] = expectedChildValue + + // Update map + existingStorable, err := m.Set(compare, hashInputProvider, key, child) + if err != nil { + return nil, 0, fmt.Errorf("failed to set %s at index %d: %s", child, key, err) + } - err = removeStorable(storage, existingKeyStorable) + // Compare old value from map with old value from elements + if (oldExpectedValue == nil) != (existingStorable == nil) { + return nil, 0, fmt.Errorf("Set returned storable %s != expected %s", existingStorable, oldExpectedValue) + } + + if existingStorable != nil { + + existingValue, err := existingStorable.StoredValue(storage) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove key %s: %s", existingKeyStorable, err) - return + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingStorable, err) } - err = removeStorable(storage, existingValueStorable) + err = valueEqual(oldExpectedValue, existingValue) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove value %s: %s", existingValueStorable, err) - return + return nil, 0, fmt.Errorf("Set() returned wrong existing value %s, want %s", existingValue, oldExpectedValue) } - err = removeValue(storage, k) + // Delete removed element from storage + err = removeStorable(storage, existingStorable) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied key %s: %s", k, err) - return + return nil, 0, fmt.Errorf("failed to remove map storable element %s: %s", existingStorable, err) } + } - err = removeValue(storage, oldV) + if nextOp == mapMutateChildContainerAfterSet { + expectedValues[expectedKey], err = modifyContainer(expectedValues[expectedKey], child, nextNestedLevels) if err != nil { - fmt.Fprintf(os.Stderr, "Failed to remove copied value %s: %s", existingValue, err) - return + return nil, 0, fmt.Errorf("failed to modify child container at key %s: %w", expectedKey, err) } + } - // Update status - status.incRemove() + case mapRemoveOp: + // Use for-range on Go map to get random key + var key atree.Value + for k := range expectedValues { + key = k + break } - // Check map elements against elements after every op - err = checkMapDataLoss(m, elements) + oldExpectedValue := expectedValues[key] + + // Update expectedValues + delete(expectedValues, key) + + // Update map + existingKeyStorable, existingValueStorable, err := m.Remove(compare, hashInputProvider, key) if err != nil { - fmt.Fprintln(os.Stderr, err) - return + return nil, 0, fmt.Errorf("failed to remove element with key %s: %s", key, err) } - if opCount >= 100 { - opCount = 0 - rootIDs, err := atree.CheckStorageHealth(storage, -1) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return - } - ids := make([]atree.SlabID, 0, len(rootIDs)) - for id := range rootIDs { - // filter out root ids with empty address - if !id.HasTempAddress() { - ids = append(ids, id) - } - } - if len(ids) != 1 || ids[0] != m.SlabID() { - fmt.Fprintf(os.Stderr, "root slab ids %v in storage, want %s\n", ids, m.SlabID()) - return - } + // Compare removed key from map with removed key from elements + existingKeyValue, err := existingKeyStorable.StoredValue(storage) + if err != nil { + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingKeyStorable, err) + } + + err = valueEqual(key, existingKeyValue) + if err != nil { + return nil, 0, fmt.Errorf("Remove() returned wrong existing key %s, want %s", existingKeyStorable, key) + } + + // Compare removed value from map with removed value from elements + existingValue, err := existingValueStorable.StoredValue(storage) + if err != nil { + return nil, 0, fmt.Errorf("failed to convert %s to value: %s", existingValueStorable, err) + } + + err = valueEqual(oldExpectedValue, existingValue) + if err != nil { + return nil, 0, fmt.Errorf("Remove() returned wrong existing value %s, want %s", existingValueStorable, oldExpectedValue) + } + + // Delete removed element from storage + err = removeStorable(storage, existingKeyStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove key %s: %s", existingKeyStorable, err) + } + + err = removeStorable(storage, existingValueStorable) + if err != nil { + return nil, 0, fmt.Errorf("failed to remove value %s: %s", existingValueStorable, err) + } + + case mapMutateChildContainerAfterGet: + key, found := getRandomChildContainerKeyInMap(expectedValues) + if !found { + // mapMutateChildContainerAfterGet op can't be performed because there isn't any child container in this map. + // Try another map operation. + return modifyMap(expectedValues, m, nestedLevels, forceRemove) + } + + child, err := m.Get(compare, hashInputProvider, key) + if err != nil { + return nil, 0, fmt.Errorf("failed to get element from map at key %s: %s", key, err) + } + + expectedValues[key], err = modifyContainer(expectedValues[key], child, nestedLevels-1) + if err != nil { + return nil, 0, fmt.Errorf("failed to modify child container at key %s: %w", key, err) + } + } + + return expectedValues, nextOp, nil +} + +func hasChildContainerInMap(expectedValues mapValue) bool { + for _, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + return true } } + return false } -func checkMapDataLoss(m *atree.OrderedMap, elements map[atree.Value]atree.Value) error { +func getRandomChildContainerKeyInMap(expectedValues mapValue) (key atree.Value, found bool) { + keys := make([]atree.Value, 0, len(expectedValues)) + for k, v := range expectedValues { + switch v.(type) { + case arrayValue, mapValue: + keys = append(keys, k) + } + } + if len(keys) == 0 { + return nil, false + } + return keys[r.Intn(len(keys))], true +} + +func checkMapDataLoss(expectedValues mapValue, m *atree.OrderedMap) error { // Check map has the same number of elements as elements - if m.Count() != uint64(len(elements)) { - return fmt.Errorf("Count() %d != len(values) %d", m.Count(), len(elements)) + if m.Count() != uint64(len(expectedValues)) { + return fmt.Errorf("Count() %d != len(values) %d", m.Count(), len(expectedValues)) } // Check every element - for k, v := range elements { + for k, v := range expectedValues { convertedValue, err := m.Get(compare, hashInputProvider, k) if err != nil { return fmt.Errorf("failed to get element with key %s: %w", k, err) @@ -400,5 +467,30 @@ func checkMapDataLoss(m *atree.OrderedMap, elements map[atree.Value]atree.Value) } } + if flagCheckSlabEnabled { + err := checkMapSlab(m) + if err != nil { + return err + } + } + return nil } + +func checkMapSlab(m *atree.OrderedMap) error { + err := atree.VerifyMap(m, m.Address(), m.Type(), typeInfoComparator, hashInputProvider, true) + if err != nil { + return err + } + + return atree.VerifyMapSerialization( + m, + cborDecMode, + cborEncMode, + decodeStorable, + decodeTypeInfo, + func(a, b atree.Storable) bool { + return reflect.DeepEqual(a, b) + }, + ) +} diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index 1d66472..fcd57b6 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -413,7 +413,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id atree.SlabID, inlinedExtraData []atree.ExtraData) (atree.Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -435,6 +435,15 @@ func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, er switch tagNumber { + case atree.CBORTagInlinedArray: + return atree.DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case atree.CBORTagInlinedMap: + return atree.DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case atree.CBORTagInlinedCompactMap: + return atree.DecodeInlinedCompactMapStorable(dec, decodeStorable, id, inlinedExtraData) + case atree.CBORTagSlabID: return atree.DecodeSlabIDStorable(dec) diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index c9ee423..b3c8120 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -19,31 +19,231 @@ package main import ( + "fmt" + "github.com/onflow/atree" "github.com/fxamacker/cbor/v2" ) -type testTypeInfo struct { - value uint64 +const ( + maxArrayTypeValue = 10 + maxMapTypeValue = 10 + + arrayTypeTagNum = 246 + mapTypeTagNum = 245 + compositeTypeTagNum = 244 +) + +type arrayTypeInfo struct { + value int +} + +func newArrayTypeInfo() arrayTypeInfo { + return arrayTypeInfo{value: r.Intn(maxArrayTypeValue)} +} + +var _ atree.TypeInfo = arrayTypeInfo{} + +func (i arrayTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (i arrayTypeInfo) IsComposite() bool { + return false +} + +func (i arrayTypeInfo) Identifier() string { + return fmt.Sprintf("array(%d)", i) +} + +func (i arrayTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(arrayTypeTagNum) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.value)) +} + +func (i arrayTypeInfo) Equal(other atree.TypeInfo) bool { + otherArrayTypeInfo, ok := other.(arrayTypeInfo) + return ok && i.value == otherArrayTypeInfo.value +} + +type mapTypeInfo struct { + value int +} + +var _ atree.TypeInfo = mapTypeInfo{} + +func newMapTypeInfo() mapTypeInfo { + return mapTypeInfo{value: r.Intn(maxMapTypeValue)} +} + +func (i mapTypeInfo) Copy() atree.TypeInfo { + return i +} + +func (i mapTypeInfo) IsComposite() bool { + return false +} + +func (i mapTypeInfo) Identifier() string { + return fmt.Sprintf("map(%d)", i) +} + +func (i mapTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(mapTypeTagNum) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.value)) +} + +func (i mapTypeInfo) Equal(other atree.TypeInfo) bool { + otherMapTypeInfo, ok := other.(mapTypeInfo) + return ok && i.value == otherMapTypeInfo.value +} + +var compositeFieldNames = []string{"a", "b", "c"} + +type compositeTypeInfo struct { + fieldStartIndex int // inclusive start index of fieldNames + fieldEndIndex int // exclusive end index of fieldNames +} + +var _ atree.TypeInfo = mapTypeInfo{} + +// newCompositeTypeInfo creates one of 10 compositeTypeInfo randomly. +// 10 possible composites: +// - ID: composite(0_0), field names: [] +// - ID: composite(0_1), field names: ["a"] +// - ID: composite(0_2), field names: ["a", "b"] +// - ID: composite(0_3), field names: ["a", "b", "c"] +// - ID: composite(1_1), field names: [] +// - ID: composite(1_2), field names: ["b"] +// - ID: composite(1_3), field names: ["b", "c"] +// - ID: composite(2_2), field names: [] +// - ID: composite(2_3), field names: ["c"] +// - ID: composite(3_3), field names: [] +func newCompositeTypeInfo() compositeTypeInfo { + // startIndex is [0, 3] + startIndex := r.Intn(len(compositeFieldNames) + 1) + + // count is [0, 3] + count := r.Intn(len(compositeFieldNames) - startIndex + 1) + + endIndex := startIndex + count + if endIndex > len(compositeFieldNames) { + panic("not reachable") + } + + return compositeTypeInfo{fieldStartIndex: startIndex, fieldEndIndex: endIndex} } -var _ atree.TypeInfo = testTypeInfo{} +func (i compositeTypeInfo) getFieldNames() []string { + return compositeFieldNames[i.fieldStartIndex:i.fieldEndIndex] +} + +func (i compositeTypeInfo) Copy() atree.TypeInfo { + return i +} -func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { - return e.EncodeUint64(i.value) +func (i compositeTypeInfo) IsComposite() bool { + return true } -func (i testTypeInfo) Equal(other atree.TypeInfo) bool { - otherTestTypeInfo, ok := other.(testTypeInfo) - return ok && i.value == otherTestTypeInfo.value +func (i compositeTypeInfo) Identifier() string { + return fmt.Sprintf("composite(%d_%d)", i.fieldStartIndex, i.fieldEndIndex) +} + +func (i compositeTypeInfo) Encode(e *cbor.StreamEncoder) error { + err := e.EncodeTagHead(compositeTypeTagNum) + if err != nil { + return err + } + err = e.EncodeArrayHead(2) + if err != nil { + return err + } + err = e.EncodeInt64(int64(i.fieldStartIndex)) + if err != nil { + return err + } + return e.EncodeInt64(int64(i.fieldEndIndex)) +} + +func (i compositeTypeInfo) Equal(other atree.TypeInfo) bool { + otherCompositeTypeInfo, ok := other.(compositeTypeInfo) + if !ok { + return false + } + return i.fieldStartIndex == otherCompositeTypeInfo.fieldStartIndex && + i.fieldEndIndex == otherCompositeTypeInfo.fieldEndIndex } func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) { - value, err := dec.DecodeUint64() + num, err := dec.DecodeTagNumber() if err != nil { return nil, err } + switch num { + case arrayTypeTagNum: + value, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + return arrayTypeInfo{value: int(value)}, nil + + case mapTypeTagNum: + value, err := dec.DecodeInt64() + if err != nil { + return nil, err + } - return testTypeInfo{value: value}, nil + return mapTypeInfo{value: int(value)}, nil + + case compositeTypeTagNum: + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, err + } + if count != 2 { + return nil, fmt.Errorf( + "failed to decode composite type info: expect 2 elemets, got %d elements", + count, + ) + } + + startIndex, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + endIndex, err := dec.DecodeInt64() + if err != nil { + return nil, err + } + + if endIndex < startIndex { + return nil, fmt.Errorf( + "failed to decode composite type info: endIndex %d < startIndex %d", + endIndex, + startIndex, + ) + } + + if endIndex > int64(len(compositeFieldNames)) { + return nil, fmt.Errorf( + "failed to decode composite type info: endIndex %d > len(compositeFieldNames) %d", + endIndex, + len(compositeFieldNames)) + } + + return compositeTypeInfo{fieldStartIndex: int(startIndex), fieldEndIndex: int(endIndex)}, nil + + default: + return nil, fmt.Errorf("failed to decode type info with tag number %d", num) + } } diff --git a/cmd/stress/utils.go b/cmd/stress/utils.go index af65438..fd7abe0 100644 --- a/cmd/stress/utils.go +++ b/cmd/stress/utils.go @@ -19,11 +19,16 @@ package main import ( + "bytes" "fmt" + "math" "math/rand" "reflect" + "sync" "time" + "github.com/fxamacker/cbor/v2" + "github.com/onflow/atree" ) @@ -40,9 +45,14 @@ const ( uint64Type smallStringType largeStringType - arrayType + maxSimpleValueType +) + +const ( + arrayType int = iota mapType - maxValueType + compositeType + maxContainerValueType ) var ( @@ -68,136 +78,84 @@ func randStr(n int) string { return string(runes) } -func generateValue(storage *atree.PersistentSlabStorage, address atree.Address, valueType int, nestedLevels int) (atree.Value, error) { +func generateSimpleValue( + valueType int, +) (expected atree.Value, actual atree.Value, err error) { switch valueType { case uint8Type: - return Uint8Value(r.Intn(255)), nil + v := Uint8Value(r.Intn(math.MaxUint8)) // 255 + return v, v, nil + case uint16Type: - return Uint16Value(r.Intn(6535)), nil + v := Uint16Value(r.Intn(math.MaxUint16)) // 65535 + return v, v, nil + case uint32Type: - return Uint32Value(r.Intn(4294967295)), nil + v := Uint32Value(r.Intn(math.MaxUint32)) // 4294967295 + return v, v, nil + case uint64Type: - return Uint64Value(r.Intn(1844674407370955161)), nil + v := Uint64Value(r.Intn(math.MaxInt)) // 9_223_372_036_854_775_807 + return v, v, nil + case smallStringType: slen := r.Intn(125) - return NewStringValue(randStr(slen)), nil + v := NewStringValue(randStr(slen)) + return v, v, nil + case largeStringType: - slen := r.Intn(125) + 1024 - return NewStringValue(randStr(slen)), nil + slen := r.Intn(125) + 1024/2 + v := NewStringValue(randStr(slen)) + return v, v, nil + + default: + return nil, nil, fmt.Errorf("unexpected randome simple value type %d", valueType) + } +} + +func generateContainerValue( + valueType int, + storage atree.SlabStorage, + address atree.Address, + nestedLevels int, +) (expected atree.Value, actual atree.Value, err error) { + switch valueType { case arrayType: length := r.Intn(maxNestedArraySize) return newArray(storage, address, length, nestedLevels) + case mapType: length := r.Intn(maxNestedMapSize) return newMap(storage, address, length, nestedLevels) - default: - return Uint8Value(r.Intn(255)), nil - } -} -func randomKey() (atree.Value, error) { - t := r.Intn(largeStringType + 1) - return generateValue(nil, atree.Address{}, t, 0) -} - -func randomValue(storage *atree.PersistentSlabStorage, address atree.Address, nestedLevels int) (atree.Value, error) { - var t int - if nestedLevels <= 0 { - t = r.Intn(largeStringType + 1) - } else { - t = r.Intn(maxValueType) - } - return generateValue(storage, address, t, nestedLevels) -} + case compositeType: + return newComposite(storage, address, nestedLevels) -func copyValue(storage *atree.PersistentSlabStorage, address atree.Address, value atree.Value) (atree.Value, error) { - switch v := value.(type) { - case Uint8Value: - return Uint8Value(uint8(v)), nil - case Uint16Value: - return Uint16Value(uint16(v)), nil - case Uint32Value: - return Uint32Value(uint32(v)), nil - case Uint64Value: - return Uint64Value(uint64(v)), nil - case StringValue: - return NewStringValue(v.str), nil - case *atree.Array: - return copyArray(storage, address, v) - case *atree.OrderedMap: - return copyMap(storage, address, v) default: - return nil, fmt.Errorf("failed to copy value: value type %T isn't supported", v) + return nil, nil, fmt.Errorf("unexpected randome container value type %d", valueType) } } -func copyArray(storage *atree.PersistentSlabStorage, address atree.Address, array *atree.Array) (*atree.Array, error) { - iterator, err := array.Iterator() - if err != nil { - return nil, err - } - return atree.NewArrayFromBatchData(storage, address, array.Type(), func() (atree.Value, error) { - v, err := iterator.Next() - if err != nil { - return nil, err - } - if v == nil { - return nil, nil - } - return copyValue(storage, address, v) - }) +func randomKey() (atree.Value, atree.Value, error) { + t := r.Intn(maxSimpleValueType) + return generateSimpleValue(t) } -func copyMap(storage *atree.PersistentSlabStorage, address atree.Address, m *atree.OrderedMap) (*atree.OrderedMap, error) { - iterator, err := m.Iterator() - if err != nil { - return nil, err +func randomValue( + storage atree.SlabStorage, + address atree.Address, + nestedLevels int, +) (expected atree.Value, actual atree.Value, err error) { + if nestedLevels <= 0 { + t := r.Intn(maxSimpleValueType) + return generateSimpleValue(t) } - return atree.NewMapFromBatchData( - storage, - address, - atree.NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (atree.Value, atree.Value, error) { - k, v, err := iterator.Next() - if err != nil { - return nil, nil, err - } - if k == nil { - return nil, nil, nil - } - copiedKey, err := copyValue(storage, address, k) - if err != nil { - return nil, nil, err - } - copiedValue, err := copyValue(storage, address, v) - if err != nil { - return nil, nil, err - } - return copiedKey, copiedValue, nil - }) -} -func removeValue(storage *atree.PersistentSlabStorage, value atree.Value) error { - switch v := value.(type) { - case *atree.Array: - return removeStorable(storage, atree.SlabIDStorable(v.SlabID())) - case *atree.OrderedMap: - return removeStorable(storage, atree.SlabIDStorable(v.SlabID())) - } - return nil + t := r.Intn(maxContainerValueType) + return generateContainerValue(t, storage, address, nestedLevels) } -func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storable) error { - sid, ok := storable.(atree.SlabIDStorable) - if !ok { - return nil - } - - id := atree.SlabID(sid) +func removeStorable(storage atree.SlabStorage, storable atree.Storable) error { value, err := storable.StoredValue(storage) if err != nil { @@ -205,8 +163,6 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl } switch v := value.(type) { - case StringValue: - return storage.Remove(id) case *atree.Array: err := v.PopIterate(func(storable atree.Storable) { _ = removeStorable(storage, storable) @@ -214,7 +170,6 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl if err != nil { return err } - return storage.Remove(id) case *atree.OrderedMap: err := v.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { @@ -224,237 +179,262 @@ func removeStorable(storage *atree.PersistentSlabStorage, storable atree.Storabl if err != nil { return err } - return storage.Remove(id) + } - default: - return fmt.Errorf("failed to remove storable: storable type %T isn't supported", v) + if sid, ok := storable.(atree.SlabIDStorable); ok { + return storage.Remove(atree.SlabID(sid)) } + + return nil } -func valueEqual(a atree.Value, b atree.Value) error { - switch a.(type) { +func valueEqual(expected atree.Value, actual atree.Value) error { + switch expected := expected.(type) { + case arrayValue: + actual, ok := actual.(*atree.Array) + if !ok { + return fmt.Errorf("failed to convert actual value to *Array, got %T", actual) + } + + return arrayEqual(expected, actual) + case *atree.Array: - return arrayEqual(a, b) + return fmt.Errorf("expected value shouldn't be *Array") + + case mapValue: + actual, ok := actual.(*atree.OrderedMap) + if !ok { + return fmt.Errorf("failed to convert actual value to *OrderedMap, got %T", actual) + } + + return mapEqual(expected, actual) + case *atree.OrderedMap: - return mapEqual(a, b) + return fmt.Errorf("expected value shouldn't be *OrderedMap") + default: - if !reflect.DeepEqual(a, b) { - return fmt.Errorf("value %s (%T) != value %s (%T)", a, a, b, b) + if !reflect.DeepEqual(expected, actual) { + return fmt.Errorf("expected value %v (%T) != actual value %v (%T)", expected, expected, actual, actual) } } + return nil } -func arrayEqual(a atree.Value, b atree.Value) error { - array1, ok := a.(*atree.Array) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.Array", a, a) - } - - array2, ok := b.(*atree.Array) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.Array", b, b) - } - - if array1.Count() != array2.Count() { - return fmt.Errorf("array %s count %d != array %s count %d", array1, array1.Count(), array2, array2.Count()) +func arrayEqual(expected arrayValue, actual *atree.Array) error { + if uint64(len(expected)) != actual.Count() { + return fmt.Errorf("array count %d != expected count %d", actual.Count(), len(expected)) } - iterator1, err := array1.Iterator() + iterator, err := actual.ReadOnlyIterator() if err != nil { - return fmt.Errorf("failed to get array1 iterator: %w", err) - } - - iterator2, err := array2.Iterator() - if err != nil { - return fmt.Errorf("failed to get array2 iterator: %w", err) + return fmt.Errorf("failed to get array iterator: %w", err) } + i := 0 for { - value1, err := iterator1.Next() + actualValue, err := iterator.Next() if err != nil { - return fmt.Errorf("iterator1.Next() error: %w", err) + return fmt.Errorf("iterator.Next() error: %w", err) } - value2, err := iterator2.Next() - if err != nil { - return fmt.Errorf("iterator2.Next() error: %w", err) + if actualValue == nil { + break } - err = valueEqual(value1, value2) - if err != nil { - return fmt.Errorf("array elements are different: %w", err) + if i >= len(expected) { + return fmt.Errorf("more elements from array iterator than expected") } - if value1 == nil || value2 == nil { - break + err = valueEqual(expected[i], actualValue) + if err != nil { + return fmt.Errorf("array elements are different: %w", err) } - } - - return nil -} -func mapEqual(a atree.Value, b atree.Value) error { - m1, ok := a.(*atree.OrderedMap) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.OrderedMap", a, a) + i++ } - m2, ok := b.(*atree.OrderedMap) - if !ok { - return fmt.Errorf("value %s type is %T, want *atree.OrderedMap", b, b) + if i != len(expected) { + return fmt.Errorf("got %d iterated array elements, expect %d values", i, len(expected)) } - if m1.Count() != m2.Count() { - return fmt.Errorf("map %s count %d != map %s count %d", m1, m1.Count(), m2, m2.Count()) - } + return nil +} - iterator1, err := m1.Iterator() - if err != nil { - return fmt.Errorf("failed to get m1 iterator: %w", err) +func mapEqual(expected mapValue, actual *atree.OrderedMap) error { + if uint64(len(expected)) != actual.Count() { + return fmt.Errorf("map count %d != expected count %d", actual.Count(), len(expected)) } - iterator2, err := m2.Iterator() + iterator, err := actual.ReadOnlyIterator() if err != nil { - return fmt.Errorf("failed to get m2 iterator: %w", err) + return fmt.Errorf("failed to get map iterator: %w", err) } + i := 0 for { - key1, value1, err := iterator1.Next() + actualKey, actualValue, err := iterator.Next() if err != nil { - return fmt.Errorf("iterator1.Next() error: %w", err) + return fmt.Errorf("iterator.Next() error: %w", err) } - key2, value2, err := iterator2.Next() - if err != nil { - return fmt.Errorf("iterator2.Next() error: %w", err) + if actualKey == nil { + break } - err = valueEqual(key1, key2) - if err != nil { - return fmt.Errorf("map keys are different: %w", err) + expectedValue, exist := expected[actualKey] + if !exist { + return fmt.Errorf("failed to find key %v in expected values", actualKey) } - err = valueEqual(value1, value2) + err = valueEqual(expectedValue, actualValue) if err != nil { return fmt.Errorf("map values are different: %w", err) } - if key1 == nil || key2 == nil { - break - } + i++ + } + + if i != len(expected) { + return fmt.Errorf("got %d iterated map elements, expect %d values", i, len(expected)) } return nil } // newArray creates atree.Array with random elements of specified size and nested level -func newArray(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.Array, error) { - typeInfo := testTypeInfo{value: 123} +func newArray( + storage atree.SlabStorage, + address atree.Address, + length int, + nestedLevel int, +) (arrayValue, *atree.Array, error) { + + typeInfo := newArrayTypeInfo() array, err := atree.NewArray(storage, address, typeInfo) if err != nil { - return nil, fmt.Errorf("failed to create new array: %w", err) + return nil, nil, fmt.Errorf("failed to create new array: %w", err) } - values := make([]atree.Value, length) + expectedValues := make(arrayValue, length) for i := 0; i < length; i++ { - value, err := randomValue(storage, address, nestedLevel-1) + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) if err != nil { - return nil, err + return nil, nil, err } - copedValue, err := copyValue(storage, atree.Address{}, value) - if err != nil { - return nil, err - } - values[i] = copedValue + + expectedValues[i] = expectedValue + err = array.Append(value) if err != nil { - return nil, err + return nil, nil, err } } - err = checkArrayDataLoss(array, values) + err = checkArrayDataLoss(expectedValues, array) if err != nil { - return nil, err + return nil, nil, err } - for _, v := range values { - err := removeValue(storage, v) - if err != nil { - return nil, err - } - } - - return array, nil + return expectedValues, array, nil } // newMap creates atree.OrderedMap with random elements of specified size and nested level -func newMap(storage *atree.PersistentSlabStorage, address atree.Address, length int, nestedLevel int) (*atree.OrderedMap, error) { - typeInfo := testTypeInfo{value: 123} +func newMap( + storage atree.SlabStorage, + address atree.Address, + length int, + nestedLevel int, +) (mapValue, *atree.OrderedMap, error) { + + typeInfo := newMapTypeInfo() m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), typeInfo) if err != nil { - return nil, fmt.Errorf("failed to create new map: %w", err) + return nil, nil, fmt.Errorf("failed to create new map: %w", err) } - elements := make(map[atree.Value]atree.Value, length) - - for i := 0; i < length; i++ { - k, err := randomKey() - if err != nil { - return nil, err - } + expectedValues := make(mapValue, length) - copiedKey, err := copyValue(storage, atree.Address{}, k) + for m.Count() < uint64(length) { + expectedKey, key, err := randomKey() if err != nil { - return nil, err + return nil, nil, err } - v, err := randomValue(storage, address, nestedLevel-1) + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) if err != nil { - return nil, err + return nil, nil, err } - copiedValue, err := copyValue(storage, atree.Address{}, v) - if err != nil { - return nil, err - } + expectedValues[expectedKey] = expectedValue - elements[copiedKey] = copiedValue - - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := m.Set(compare, hashInputProvider, key, value) if err != nil { - return nil, err + return nil, nil, err } if existingStorable != nil { // Delete overwritten element err = removeStorable(storage, existingStorable) if err != nil { - return nil, fmt.Errorf("failed to remove storable element %s: %w", existingStorable, err) + return nil, nil, fmt.Errorf("failed to remove storable element %s: %w", existingStorable, err) } } } - err = checkMapDataLoss(m, elements) + err = checkMapDataLoss(expectedValues, m) + if err != nil { + return nil, nil, err + } + + return expectedValues, m, nil +} + +// newComposite creates atree.OrderedMap with elements of random composite type and nested level +func newComposite( + storage atree.SlabStorage, + address atree.Address, + nestedLevel int, +) (mapValue, *atree.OrderedMap, error) { + + compositeType := newCompositeTypeInfo() + + m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), compositeType) if err != nil { - return nil, err + return nil, nil, fmt.Errorf("failed to create new map: %w", err) } - for k, v := range elements { - err := removeValue(storage, k) + expectedValues := make(mapValue) + + for _, name := range compositeType.getFieldNames() { + + expectedKey, key := NewStringValue(name), NewStringValue(name) + + expectedValue, value, err := randomValue(storage, address, nestedLevel-1) if err != nil { - return nil, err + return nil, nil, err } - err = removeValue(storage, v) + + expectedValues[expectedKey] = expectedValue + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) if err != nil { - return nil, err + return nil, nil, err + } + if existingStorable != nil { + return nil, nil, fmt.Errorf("failed to create new map of composite type: found duplicate field name %s", name) } } - return m, nil + err = checkMapDataLoss(expectedValues, m) + if err != nil { + return nil, nil, err + } + + return expectedValues, m, nil } type InMemBaseStorage struct { @@ -542,3 +522,62 @@ func (s *InMemBaseStorage) SegmentsTouched() int { func (s *InMemBaseStorage) ResetReporter() { // not needed } + +// arrayValue is an atree.Value that represents an array of atree.Value. +// It's used to test elements of atree.Array. +type arrayValue []atree.Value + +var _ atree.Value = &arrayValue{} + +func (v arrayValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { + panic("not reachable") +} + +// mapValue is an atree.Value that represents a map of atree.Value. +// It's used to test elements of atree.OrderedMap. +type mapValue map[atree.Value]atree.Value + +var _ atree.Value = &mapValue{} + +func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Storable, error) { + panic("not reachable") +} + +var typeInfoComparator = func(a atree.TypeInfo, b atree.TypeInfo) bool { + aID, _ := getEncodedTypeInfo(a) + bID, _ := getEncodedTypeInfo(b) + return aID == bID +} + +func getEncodedTypeInfo(ti atree.TypeInfo) (string, error) { + b := getTypeIDBuffer() + defer putTypeIDBuffer(b) + + enc := cbor.NewStreamEncoder(b) + err := ti.Encode(enc) + if err != nil { + return "", err + } + enc.Flush() + + return b.String(), nil +} + +const defaultTypeIDBufferSize = 256 + +var typeIDBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(defaultTypeIDBufferSize) + return e + }, +} + +func getTypeIDBuffer() *bytes.Buffer { + return typeIDBufferPool.Get().(*bytes.Buffer) +} + +func putTypeIDBuffer(e *bytes.Buffer) { + e.Reset() + typeIDBufferPool.Put(e) +} diff --git a/encode.go b/encode.go index 20e51d4..a416d12 100644 --- a/encode.go +++ b/encode.go @@ -28,21 +28,39 @@ import ( // Encoder writes atree slabs to io.Writer. type Encoder struct { io.Writer - CBOR *cbor.StreamEncoder - Scratch [64]byte + CBOR *cbor.StreamEncoder + Scratch [64]byte + encMode cbor.EncMode + _inlinedExtraData *InlinedExtraData } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, + Writer: w, + CBOR: streamEncoder, + encMode: encMode, } } +func (enc *Encoder) inlinedExtraData() *InlinedExtraData { + if enc._inlinedExtraData == nil { + enc._inlinedExtraData = newInlinedExtraData() + } + return enc._inlinedExtraData +} + +func (enc *Encoder) hasInlinedExtraData() bool { + if enc._inlinedExtraData == nil { + return false + } + return !enc._inlinedExtraData.empty() +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, + inlinedExtraData []ExtraData, ) ( Storable, error, @@ -101,7 +119,7 @@ func DecodeSlab( case slabStorable: cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) - storable, err := decodeStorable(cborDec, id) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode slab storable") @@ -116,7 +134,6 @@ func DecodeSlab( } } -// TODO: make it inline func GetUintCBORSize(n uint64) uint32 { if n <= 23 { return 1 diff --git a/errors.go b/errors.go index f493188..6059786 100644 --- a/errors.go +++ b/errors.go @@ -456,6 +456,24 @@ func (e *MapElementCountError) Error() string { return e.msg } +// ReadOnlyIteratorElementMutationError is the error returned when readonly iterator element is mutated. +type ReadOnlyIteratorElementMutationError struct { + containerValueID ValueID + elementValueID ValueID +} + +// NewReadOnlyIteratorElementMutationError creates ReadOnlyIteratorElementMutationError. +func NewReadOnlyIteratorElementMutationError(containerValueID, elementValueID ValueID) error { + return NewFatalError(&ReadOnlyIteratorElementMutationError{ + containerValueID: containerValueID, + elementValueID: elementValueID, + }) +} + +func (e *ReadOnlyIteratorElementMutationError) Error() string { + return fmt.Sprintf("element (%s) cannot be mutated because it is from readonly iterator of container (%s)", e.elementValueID, e.containerValueID) +} + func wrapErrorAsExternalErrorIfNeeded(err error) error { return wrapErrorfAsExternalErrorIfNeeded(err, "") } diff --git a/map.go b/map.go index 3f4f7fb..51493c3 100644 --- a/map.go +++ b/map.go @@ -29,6 +29,8 @@ import ( "github.com/fxamacker/circlehash" ) +// NOTE: we use encoding size (in bytes) instead of Go type size for slab operations, +// such as merge and split, so size constants here are related to encoding size. const ( digestSize = 8 @@ -83,6 +85,18 @@ const ( // CircleHash64fx and SipHash might use this const as part of their // 128-bit seed (when they don't use 64-bit -> 128-bit seed expansion func). typicalRandomConstant = uint64(0x1BD11BDAA9FC1A22) // DO NOT MODIFY + + // inlined map data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value index head (1 byte) + + // value index (8 bytes) + inlinedMapDataSlabPrefixSize = inlinedTagNumSize + + inlinedCBORArrayHeadSize + + inlinedExtraDataIndexSize + + inlinedCBORValueIDHeadSize + + inlinedValueIDSize ) // MaxCollisionLimitPerDigest is the noncryptographic hash collision limit @@ -100,6 +114,15 @@ type MapValue Storable type element interface { fmt.Stringer + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + Get( storage SlabStorage, digester Digester, @@ -107,7 +130,7 @@ type element interface { hkey Digest, comparator ValueComparator, key Value, - ) (MapValue, error) + ) (MapKey, MapValue, error) // Set returns updated element, which may be a different type of element because of hash collision. Set( @@ -121,7 +144,7 @@ type element interface { hip HashInputProvider, key Value, value Value, - ) (newElem element, existingValue MapValue, err error) + ) (newElem element, keyStorable MapKey, existingMapValueStorable MapValue, err error) // Remove returns matched key, value, and updated element. // Updated element may be nil, modified, or a different type of element. @@ -159,9 +182,45 @@ type elementGroup interface { type elements interface { fmt.Stringer - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) - Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) Merge(elements) error Split() (elements, elements, error) @@ -239,6 +298,8 @@ type MapExtraData struct { Seed uint64 } +var _ ExtraData = &MapExtraData{} + // MapDataSlab is leaf node, implementing MapSlab. // anySize is true for data slab that isn't restricted by size requirement. type MapDataSlab struct { @@ -253,9 +314,11 @@ type MapDataSlab struct { anySize bool collisionGroup bool + inlined bool } var _ MapSlab = &MapDataSlab{} +var _ ContainerStorable = &MapDataSlab{} // MapMetaDataSlab is internal node, implementing MapSlab. type MapMetaDataSlab struct { @@ -272,9 +335,44 @@ var _ MapSlab = &MapMetaDataSlab{} type MapSlab interface { Slab - Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) - Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (existingValue MapValue, err error) - Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) + getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, MapKey, error) + + Get( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) + + Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, + ) (MapKey, MapValue, error) + + Remove( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, + ) (MapKey, MapValue, error) IsData() bool @@ -292,15 +390,42 @@ type MapSlab interface { SetExtraData(*MapExtraData) PopIterate(SlabStorage, MapPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool + Inline(SlabStorage) error + Uninline(SlabStorage) error } +// OrderedMap is an ordered map of key-value pairs; keys can be any hashable type +// and values can be any serializable value type. It supports heterogeneous key +// or value types (e.g. first key storing a boolean and second key storing a string). +// OrderedMap keeps values in specific sorted order and operations are deterministic +// so the state of the segments after a sequence of operations are always unique. +// +// OrderedMap key-value pairs can be stored in one or more relatively fixed-sized segments. +// +// OrderedMap can be inlined into its parent container when the entire content fits in +// parent container's element size limit. Specifically, OrderedMap with one segment +// which fits in size limit can be inlined, while OrderedMap with multiple segments +// can't be inlined. type OrderedMap struct { Storage SlabStorage root MapSlab digesterBuilder DigesterBuilder + + // parentUpdater is a callback that notifies parent container when this map is modified. + // If this callback is nil, this map has no parent. Otherwise, this map has parent + // and this callback must be used when this map is changed by Set and Remove. + // + // parentUpdater acts like "parent pointer". It is not stored physically and is only in memory. + // It is setup when child map is returned from parent's Get. It is also setup when + // new child is added to parent through Set or Insert. + parentUpdater parentUpdater } var _ Value = &OrderedMap{} +var _ mutableValueNotifier = &OrderedMap{} const mapExtraDataLength = 3 @@ -365,17 +490,25 @@ func newMapExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (* }, nil } +func (m *MapExtraData) isExtraData() bool { + return true +} + +func (m *MapExtraData) Type() TypeInfo { + return m.TypeInfo +} + // Encode encodes extra data as CBOR array: // // [type info, count, seed] -func (m *MapExtraData) Encode(enc *Encoder) error { +func (m *MapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { err := enc.CBOR.EncodeArrayHead(mapExtraDataLength) if err != nil { return NewEncodingError(err) } - err = m.TypeInfo.Encode(enc.CBOR) + err = encodeTypeInfo(enc, m.TypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by TypeInfo interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") @@ -399,7 +532,7 @@ func (m *MapExtraData) Encode(enc *Encoder) error { return nil } -func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (element, error) { +func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (element, error) { nt, err := cborDec.NextType() if err != nil { return nil, NewDecodingError(err) @@ -408,7 +541,7 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch nt { case cbor.ArrayType: // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). - return newSingleElementFromData(cborDec, decodeStorable) + return newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case cbor.TagType: tagNum, err := cborDec.DecodeTagNumber() @@ -418,10 +551,10 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch tagNum { case CBORTagInlineCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newInlineCollisionGroupFromData(). - return newInlineCollisionGroupFromData(cborDec, decodeStorable) + return newInlineCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case CBORTagExternalCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newExternalCollisionGroupFromData(). - return newExternalCollisionGroupFromData(cborDec, decodeStorable) + return newExternalCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) default: return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized tag number %d", tagNum)) } @@ -452,7 +585,7 @@ func newSingleElement(storage SlabStorage, address Address, key Value, value Val }, nil } -func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*singleElement, error) { +func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*singleElement, error) { elemCount, err := cborDec.DecodeArrayHead() if err != nil { return nil, NewDecodingError(err) @@ -462,13 +595,13 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab return nil, NewDecodingError(fmt.Errorf("failed to decode single element: expect array of 2 elements, got %d elements", elemCount)) } - key, err := decodeStorable(cborDec, SlabIDUndefined) + key, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - value, err := decodeStorable(cborDec, SlabIDUndefined) + value, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode value's storable") @@ -496,14 +629,14 @@ func (e *singleElement) Encode(enc *Encoder) error { err = e.key.Encode(enc) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map key") + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map key storable") } // Encode value err = e.value.Encode(enc) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value storable") } err = enc.CBOR.Flush() @@ -514,16 +647,30 @@ func (e *singleElement) Encode(enc *Encoder) error { return nil } -func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElement) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + k, v, err := e.Get(storage, digester, level, hkey, comparator, key) + + nextKey := MapKey(nil) + return k, v, nextKey, err +} + +func (e *singleElement) Get(storage SlabStorage, _ Digester, _ uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return e.value, nil + return e.key, e.value, nil } - return nil, NewKeyNotFoundError(key) + return nil, nil, NewKeyNotFoundError(key) } // Set updates value if key matches, otherwise returns inlineCollisionGroup with existing and new elements. @@ -542,27 +689,27 @@ func (e *singleElement) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { equal, err := comparator(storage, key, e.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } // Key matches, overwrite existing value if equal { - existingValue := e.value + existingMapValueStorable := e.value valueStorable, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(e.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } e.value = valueStorable e.size = singleElementPrefixSize + e.key.ByteSize() + e.value.ByteSize() - return e, existingValue, nil + return e, e.key, existingMapValueStorable, nil } // Hash collision detected @@ -586,20 +733,20 @@ func (e *singleElement) Set( kv, err := e.key.StoredValue(storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's stored value") } existingKeyDigest, err := b.Digest(hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigestBuilder interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get key's digester") } defer putDigester(existingKeyDigest) d, err := existingKeyDigest.Digest(level + 1) if err != nil { // Wrap err as external error (if needed) because err is returned by Digester interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get key's digest at level %d", level+1)) } group := &inlineCollisionGroup{ @@ -648,8 +795,8 @@ func (e *singleElement) String() string { return fmt.Sprintf("%s:%s", e.key, e.value) } -func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*inlineCollisionGroup, error) { - elements, err := newElementsFromData(cborDec, decodeStorable) +func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*inlineCollisionGroup, error) { + elements, err := newElementsFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). return nil, err @@ -686,12 +833,33 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return nil } -func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *inlineCollisionGroup) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + + // Adjust level and hkey for collision group. + level++ + if level > digester.Levels() { + return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + } + hkey, _ := digester.Digest(level) + + // Search key in collision group with adjusted hkeyPrefix and hkey. + // Don't need to wrap error as external error because err is already categorized by elements.Get(). + return e.elements.getElementAndNextKey(storage, digester, level, hkey, comparator, key) +} + +func (e *inlineCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -711,19 +879,19 @@ func (e *inlineCollisionGroup) Set( hip HashInputProvider, key Value, value Value, -) (element, MapValue, error) { +) (element, MapKey, MapValue, error) { // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("inline collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := e.elements.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, nil, err + return nil, nil, nil, err } if level == 1 { @@ -734,7 +902,7 @@ func (e *inlineCollisionGroup) Set( id, err := storage.GenerateSlabID(address) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded( + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded( err, fmt.Sprintf("failed to generate slab ID for address 0x%x", address)) } @@ -751,21 +919,20 @@ func (e *inlineCollisionGroup) Set( collisionGroup: true, } - err = storage.Store(id, slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, nil, nil, err } // Create and return externalCollisionGroup (wrapper of newly created MapDataSlab) return &externalCollisionGroup{ slabID: id, size: externalCollisionGroupPrefixSize + SlabIDStorable(id).ByteSize(), - }, existingValue, nil + }, keyStorable, existingMapValueStorable, nil } } - return e, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -829,9 +996,9 @@ func (e *inlineCollisionGroup) String() string { return "inline[" + e.elements.String() + "]" } -func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*externalCollisionGroup, error) { +func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*externalCollisionGroup, error) { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode Storable") @@ -875,17 +1042,43 @@ func (e *externalCollisionGroup) Encode(enc *Encoder) error { return nil } -func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *externalCollisionGroup) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, nil, err + } + + // Adjust level and hkey for collision group. + level++ + if level > digester.Levels() { + return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + } + hkey, _ := digester.Digest(level) + + // Search key in collision group with adjusted hkeyPrefix and hkey. + // Don't need to wrap error as external error because err is already categorized by MapSlab.getElementAndNextKey(). + return slab.getElementAndNextKey(storage, digester, level, hkey, comparator, key) +} + +func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + slab, err := getMapSlab(storage, e.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) @@ -894,26 +1087,37 @@ func (e *externalCollisionGroup) Get(storage SlabStorage, digester Digester, lev return slab.Get(storage, digester, level, hkey, comparator, key) } -func (e *externalCollisionGroup) Set(storage SlabStorage, _ Address, b DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (element, MapValue, error) { +func (e *externalCollisionGroup) Set( + storage SlabStorage, + _ Address, + b DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (element, MapKey, MapValue, error) { slab, err := getMapSlab(storage, e.slabID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, nil, err + return nil, nil, nil, err } // Adjust level and hkey for collision group level++ if level > digester.Levels() { - return nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) + return nil, nil, nil, NewHashLevelErrorf("external collision group digest level is %d, want <= %d", level, digester.Levels()) } hkey, _ := digester.Digest(level) - existingValue, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := slab.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, nil, err + return nil, nil, nil, err } - return e, existingValue, nil + return e, keyStorable, existingMapValueStorable, nil } // Remove returns key, value, and updated element if key is found. @@ -1029,7 +1233,7 @@ func (e *externalCollisionGroup) String() string { return fmt.Sprintf("external(%s)", e.slabID) } -func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (elements, error) { +func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (elements, error) { arrayCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -1076,7 +1280,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(singleElementsPrefixSize) elems := make([]*singleElement, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newSingleElementFromData(cborDec, decodeStorable) + elem, err := newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). return nil, err @@ -1102,7 +1306,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newElementFromData(cborDec, decodeStorable) + elem, err := newElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementFromData(). return nil, err @@ -1216,10 +1420,15 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } -func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *hkeyElements) getElement( + digester Digester, + level uint, + hkey Digest, + key Value, +) (element, int, error) { if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, 0, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } // binary search by hkey @@ -1241,20 +1450,89 @@ func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, h // No matching hkey if equalIndex == -1 { - return nil, NewKeyNotFoundError(key) + return nil, 0, NewKeyNotFoundError(key) } - elem := e.elems[equalIndex] + return e.elems[equalIndex], equalIndex, nil +} + +func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + elem, _, err := e.getElement(digester, level, hkey, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by hkeyElements.getElement(). + return nil, nil, err + } // Don't need to wrap error as external error because err is already categorized by element.Get(). return elem.Get(storage, digester, level, hkey, comparator, key) } -func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *hkeyElements) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + elem, index, err := e.getElement(digester, level, hkey, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by hkeyElements.getElement(). + return nil, nil, nil, err + } + + k, v, nk, err := elem.getElementAndNextKey(storage, digester, level, hkey, comparator, key) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by hkeyElements.get(). + return nil, nil, nil, err + } + + if nk != nil { + // Found next key in element group. + return k, v, nk, nil + } + + nextIndex := index + 1 + + switch { + case nextIndex < len(e.elems): + // Next element is still in the same hkeyElements group. + nextElement := e.elems[nextIndex] + + nextKey, err := firstKeyInElement(storage, nextElement) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by firstKeyInElement(). + return nil, nil, nil, err + } + + return k, v, nextKey, nil + + case nextIndex == len(e.elems): + // Next element is outside this hkeyElements group, so nextKey is nil. + return k, v, nil, nil + + default: // nextIndex > len(e.elems) + // This should never happen. + return nil, nil, nil, NewUnreachableError() + } +} + +func (e *hkeyElements) Set( + storage SlabStorage, + address Address, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { // Check hkeys are not empty if level >= digester.Levels() { - return nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("hkey elements digest level is %d, want < %d", level, digester.Levels()) } if len(e.hkeys) == 0 { @@ -1263,7 +1541,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = []Digest{hkey} @@ -1272,7 +1550,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey < e.hkeys[0] { @@ -1281,7 +1559,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, Digest(0)) @@ -1294,7 +1572,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } if hkey > e.hkeys[len(e.hkeys)-1] { @@ -1303,7 +1581,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.hkeys = append(e.hkeys, hkey) @@ -1312,7 +1590,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } equalIndex := -1 // first index that m.hkeys[h] == hkey @@ -1347,10 +1625,10 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild elementCount, err := elem.Count(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Count(). - return nil, err + return nil, nil, err } if elementCount == 0 { - return nil, NewMapElementCountError("expect element count > 0, got element count == 0") + return nil, nil, NewMapElementCountError("expect element count > 0, got element count == 0") } // collisionCount is elementCount-1 because: @@ -1362,22 +1640,22 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild // Check if existing collision count reached MaxCollisionLimitPerDigest if collisionCount >= MaxCollisionLimitPerDigest { // Enforce collision limit on inserts and ignore updates. - _, err = elem.Get(storage, digester, level, hkey, comparator, key) + _, _, err = elem.Get(storage, digester, level, hkey, comparator, key) if err != nil { var knfe *KeyNotFoundError if errors.As(err, &knfe) { // Don't allow any more collisions for a digest that // already reached MaxCollisionLimitPerDigest. - return nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) + return nil, nil, NewCollisionLimitError(MaxCollisionLimitPerDigest) } } } } - elem, existingValue, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) + elem, keyStorable, existingMapValueStorable, err := elem.Set(storage, address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). - return nil, err + return nil, nil, err } e.elems[equalIndex] = elem @@ -1391,7 +1669,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild } e.size = size - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } // No matching hkey @@ -1399,7 +1677,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } // insert into sorted hkeys @@ -1414,7 +1692,7 @@ func (e *hkeyElements) Set(storage SlabStorage, address Address, b DigesterBuild e.size += digestSize + newElem.Size() - return nil, nil + return newElem.key, nil, nil } func (e *hkeyElements) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -1844,31 +2122,78 @@ func (e *singleElements) Encode(enc *Encoder) error { return nil } -func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { +func (e *singleElements) get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, int, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, 0, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search by key - for _, elem := range e.elems { + for i, elem := range e.elems { equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, 0, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - return elem.value, nil + return elem.key, elem.value, i, nil } } - return nil, NewKeyNotFoundError(key) + return nil, nil, 0, NewKeyNotFoundError(key) +} + +func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + k, v, _, err := e.get(storage, digester, level, hkey, comparator, key) + return k, v, err +} + +func (e *singleElements) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + k, v, index, err := e.get(storage, digester, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + nextIndex := index + 1 + + switch { + case nextIndex < len(e.elems): + // Next element is still in the same singleElements group. + nextKey := e.elems[nextIndex].key + return k, v, nextKey, nil + + case nextIndex == len(e.elems): + // Next element is outside this singleElements group, so nextKey is nil. + return k, v, nil, nil + + default: // nextIndex > len(e.elems) + // This should never happen. + return nil, nil, nil, NewUnreachableError() + } } -func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBuilder, digester Digester, level uint, _ Digest, comparator ValueComparator, _ HashInputProvider, key Value, value Value) (MapValue, error) { +func (e *singleElements) Set( + storage SlabStorage, + address Address, + _ DigesterBuilder, + digester Digester, + level uint, + _ Digest, + comparator ValueComparator, + _ HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { if level != digester.Levels() { - return nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) + return nil, nil, NewHashLevelErrorf("single elements digest level is %d, want %d", level, digester.Levels()) } // linear search key and update value @@ -1878,16 +2203,17 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui equal, err := comparator(storage, key, elem.key) if err != nil { // Wrap err as external error (if needed) because err is returned by ValueComparator callback. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to compare keys") } if equal { - existingValue := elem.value + existingKeyStorable := elem.key + existingValueStorable := elem.value vs, err := value.Storable(storage, address, maxInlineMapValueSize(uint64(elem.key.ByteSize()))) if err != nil { // Wrap err as external error (if needed) because err is returned by Value interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get value's storable") } elem.value = vs @@ -1902,7 +2228,7 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui } e.size = size - return existingValue, nil + return existingKeyStorable, existingValueStorable, nil } } @@ -1910,12 +2236,12 @@ func (e *singleElements) Set(storage SlabStorage, address Address, _ DigesterBui newElem, err := newSingleElement(storage, address, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElement(). - return nil, err + return nil, nil, err } e.elems = append(e.elems, newElem) e.size += newElem.size - return nil, nil + return newElem.key, nil, nil } func (e *singleElements) Remove(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -2147,7 +2473,7 @@ func newMapDataSlabFromDataV0( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV0(). return nil, err @@ -2177,23 +2503,18 @@ func newMapDataSlabFromDataV0( // newMapDataSlabFromDataV1 decodes data in version 1: // -// Root DataSlab Header: -// -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ -// -// Non-root DataSlab Header (18 bytes): +// DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func newMapDataSlabFromDataV1( id SlabID, @@ -2208,6 +2529,7 @@ func newMapDataSlabFromDataV1( ) { var err error var extraData *MapExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -2219,7 +2541,21 @@ func newMapDataSlabFromDataV1( } } - // Decode next slab ID + // Decode inlined extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newInlinedExtraDataFromData(). + return nil, err + } + } + + // Decode next slab ID for non-root slab if h.hasNextSlabID() { if len(data) < slabIDSize { return nil, NewDecodingErrorf("data is too short for map data slab") @@ -2236,7 +2572,7 @@ func newMapDataSlabFromDataV1( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV1(). return nil, err @@ -2264,28 +2600,286 @@ func newMapDataSlabFromDataV1( }, nil } -// Encode encodes this map data slab to the given encoder. +// DecodeInlinedCompactMapStorable decodes inlined compact map data. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedCompactMap, and tag contant +// as 3-element array: // -// Root DataSlab Header: +// - index of inlined extra data +// - value ID index +// - CBOR array of elements // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedCompactMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*compactMapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect *compactMapExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode values + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if elemCount != uint64(len(extraData.keys)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode compact map values: got %d, expect %d", + elemCount, + extraData.mapExtraData.Count)) + } + + // Make a copy of digests because extraData is shared by all inlined compact map data referring to the same type. + hkeys := make([]Digest, len(extraData.hkeys)) + copy(hkeys, extraData.hkeys) + + // Decode values + elementsSize := uint32(hkeyElementsPrefixSize) + elems := make([]element, elemCount) + for i := 0; i < int(elemCount); i++ { + value, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + return nil, err + } + + // Make a copy of key in case it is shared. + key := extraData.keys[i].Copy() + + elemSize := singleElementPrefixSize + key.ByteSize() + value.ByteSize() + elem := &singleElement{key, value, elemSize} + + elems[i] = elem + elementsSize += digestSize + elem.Size() + } + + // Create hkeyElements + elements := &hkeyElements{ + hkeys: hkeys, + elems: elems, + level: 0, + size: elementsSize, + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.mapExtraData.TypeInfo.Copy(), + Count: extraData.mapExtraData.Count, + Seed: extraData.mapExtraData.Seed, + }, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// DecodeInlinedMapStorable decodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, and tag contant +// as 3-element array: // -// Non-root DataSlab Header (18 bytes): +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ // -// +-------------------------------+-------------------------+ -// | slab version + flag (2 bytes) | next slab ID (16 bytes) | -// +-------------------------------+-------------------------+ +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined map data slab, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + extraData, ok := inlinedExtraData[extraDataIndex].(*MapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "extra data (%T) is wrong type, expect *MapExtraData", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined compact map data: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index SlabIndex + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode elements + elements, err := newElementsFromData(dec, decodeStorable, slabID, inlinedExtraData) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). + return nil, err + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // NOTE: extra data doesn't need to be copied because every inlined map has its own inlined extra data. + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: &MapExtraData{ + // Make a copy of extraData.TypeInfo because + // inlined extra data are shared by all inlined slabs. + TypeInfo: extraData.TypeInfo.Copy(), + Count: extraData.Count, + Seed: extraData.Seed, + }, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// Encode encodes this map data slab to the given encoder. +// +// Root DataSlab Header: +// +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | extra data (if root) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+----------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func (m *MapDataSlab) Encode(enc *Encoder) error { + if m.inlined { + return m.encodeAsInlined(enc) + } + + // Encoding is done in two steps: + // + // 1. Encode map elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + // Get a buffer from a pool to encode elements. + elementBuf := getBuffer() + defer putBuffer(elementBuf) + + elemEnc := NewEncoder(elementBuf, enc.encMode) + + err := m.encodeElements(elemEnc) + if err != nil { + return err + } + const version = 1 slabType := slabMapData @@ -2298,7 +2892,7 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return NewEncodingError(err) } - if m.hasPointer() { + if m.HasPointer() { h.setHasPointers() } @@ -2314,7 +2908,11 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - // Write head (version + flag) + if elemEnc.hasInlinedExtraData() { + h.setHasInlinedSlabs() + } + + // Encode head _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) @@ -2322,14 +2920,23 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { // Encode extra data if m.extraData != nil { - err = m.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = m.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode(). return err } } - // Encode next slab ID + // Encode inlined types + if elemEnc.hasInlinedExtraData() { + err = elemEnc.inlinedExtraData().Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + // Encode next slab ID for non-root slab if m.next != SlabIDUndefined { n, err := m.next.ToRawBytes(enc.Scratch[:]) if err != nil { @@ -2345,6 +2952,99 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode elements + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeElements(enc *Encoder) error { + err := m.elements.Encode(enc) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by elements.Encode(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// encodeAsInlined encodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, +// and tag contant as 3-element array: +// +// +------------------+----------------+----------+ +// | extra data index | value ID index | elements | +// +------------------+----------------+----------+ +func (m *MapDataSlab) encodeAsInlined(enc *Encoder) error { + if m.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root map data slab as inlined")) + } + + if !m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone map data slab as inlined")) + } + + if hkeys, keys, values, ok := m.canBeEncodedAsCompactMap(); ok { + return encodeAsInlinedCompactMap(enc, m.header.slabID, m.extraData, hkeys, keys, values) + } + + return m.encodeAsInlinedMap(enc) +} + +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder) error { + + extraDataIndex, err := enc.inlinedExtraData().addMapExtraData(m.extraData) + if err != nil { + // err is already categorized by InlinedExtraData.addMapExtraData(). + return err + } + + if extraDataIndex > maxInlinedExtraDataIndex { + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements err = m.elements.Encode(enc) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). @@ -2359,7 +3059,169 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } -func (m *MapDataSlab) hasPointer() bool { +// encodeAsInlinedCompactMap encodes hkeys, keys, and values as inlined compact map value. +func encodeAsInlinedCompactMap( + enc *Encoder, + slabID SlabID, + extraData *MapExtraData, + hkeys []Digest, + keys []ComparableStorable, + values []Storable, +) error { + + extraDataIndex, cachedKeys, err := enc.inlinedExtraData().addCompactMapExtraData(extraData, hkeys, keys) + if err != nil { + // err is already categorized by InlinedExtraData.addCompactMapExtraData(). + return err + } + + if len(keys) != len(cachedKeys) { + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in cached compact map type %d", len(keys), len(cachedKeys))) + } + + if extraDataIndex > maxInlinedExtraDataIndex { + // This should never happen because of slab size. + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit %d", extraDataIndex, maxInlinedExtraDataIndex)) + } + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedCompactMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab id + err = enc.CBOR.EncodeBytes(slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: compact map values in the order of cachedKeys + err = encodeCompactMapValues(enc, cachedKeys, keys, values) + if err != nil { + // err is already categorized by encodeCompactMapValues(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// encodeCompactMapValues encodes compact values as an array of values ordered by cachedKeys. +func encodeCompactMapValues( + enc *Encoder, + cachedKeys []ComparableStorable, + keys []ComparableStorable, + values []Storable, +) error { + + var err error + + err = enc.CBOR.EncodeArrayHead(uint64(len(cachedKeys))) + if err != nil { + return NewEncodingError(err) + } + + keyIndexes := make([]int, len(keys)) + for i := 0; i < len(keys); i++ { + keyIndexes[i] = i + } + + // Encode values in the same order as cachedKeys. + for i, cachedKey := range cachedKeys { + found := false + for j := i; j < len(keyIndexes); j++ { + index := keyIndexes[j] + key := keys[index] + + if cachedKey.Equal(key) { + found = true + keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] + + err = values[index].Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value storable") + } + + break + } + } + if !found { + return NewEncodingError(fmt.Errorf("failed to find key %v", cachedKey)) + } + } + + return nil +} + +// canBeEncodedAsCompactMap returns true if: +// - map data slab is inlined +// - map type is composite type +// - no collision elements +// - keys are stored inline (not in a separate slab) +func (m *MapDataSlab) canBeEncodedAsCompactMap() ([]Digest, []ComparableStorable, []Storable, bool) { + if !m.inlined { + return nil, nil, nil, false + } + + if !m.extraData.TypeInfo.IsComposite() { + return nil, nil, nil, false + } + + elements, ok := m.elements.(*hkeyElements) + if !ok { + return nil, nil, nil, false + } + + keys := make([]ComparableStorable, m.extraData.Count) + values := make([]Storable, m.extraData.Count) + + for i, e := range elements.elems { + se, ok := e.(*singleElement) + if !ok { + // Has collision element + return nil, nil, nil, false + } + + if _, ok = se.key.(SlabIDStorable); ok { + // Key is stored in a separate slab + return nil, nil, nil, false + } + + key, ok := se.key.(ComparableStorable) + if !ok { + // Key can't be compared (sorted) + return nil, nil, nil, false + } + + keys[i] = key + values[i] = se.value + } + + return elements.hkeys, keys, values, true +} + +func (m *MapDataSlab) HasPointer() bool { return m.elements.hasPointer() } @@ -2368,12 +3230,74 @@ func (m *MapDataSlab) ChildStorables() []Storable { } func (m *MapDataSlab) getPrefixSize() uint32 { + if m.inlined { + return inlinedMapDataSlabPrefixSize + } if m.extraData != nil { return mapRootDataSlabPrefixSize } return mapDataSlabPrefixSize } +func (m *MapDataSlab) Inlined() bool { + return m.inlined +} + +// Inlinable returns true if +// - map data slab is root slab +// - size of inlined map data slab <= maxInlineSize +func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { + if m.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + inlinedSize := inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + +// inline converts not-inlined MapDataSlab to inlined MapDataSlab and removes it from storage. +func (m *MapDataSlab) Inline(storage SlabStorage) error { + if m.inlined { + return NewFatalError(fmt.Errorf("failed to inline MapDataSlab %s: it is inlined already", m.header.slabID)) + } + + id := m.header.slabID + + // Remove slab from storage because it is going to be inlined. + err := storage.Remove(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Update data slab size from not inlined to inlined + m.header.size = inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Update data slab inlined status. + m.inlined = true + + return nil +} + +// uninline converts an inlined MapDataSlab to uninlined MapDataSlab and stores it in storage. +func (m *MapDataSlab) Uninline(storage SlabStorage) error { + if !m.inlined { + return NewFatalError(fmt.Errorf("failed to uninline MapDataSlab %s: it is not inlined", m.header.slabID)) + } + + // Update data slab size from inlined to not inlined. + m.header.size = mapRootDataSlabPrefixSize + m.elements.Size() + + // Update data slab inlined status. + m.inlined = false + + // Store slab in storage + return storeSlab(storage, m) +} + func elementsStorables(elems elements, childStorables []Storable) []Storable { switch v := elems.(type) { @@ -2426,12 +3350,22 @@ func (m *MapDataSlab) StoredValue(storage SlabStorage) (Value, error) { }, nil } -func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { - existingValue, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.elements.Set(storage, m.SlabID().address, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Set(). - return nil, err + return nil, nil, err } // Adjust header's first key @@ -2441,13 +3375,14 @@ func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Diges m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storeSlab(storage, m) + if err != nil { + return nil, nil, err + } } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -2465,10 +3400,11 @@ func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storeSlab(storage, m) + if err != nil { + return nil, nil, err + } } return k, v, nil @@ -2982,7 +3918,8 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { // Encode extra data if present if m.extraData != nil { - err = m.extraData.Encode(enc) + // Use defaultEncodeTypeInfo to encode root level TypeInfo as is. + err = m.extraData.Encode(enc, defaultEncodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapExtraData.Encode(). return err @@ -3027,6 +3964,22 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { return nil } +func (m *MapMetaDataSlab) Inlined() bool { + return false +} + +func (m *MapMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (m *MapMetaDataSlab) Inline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to inline MapMetaDataSlab %s: MapMetaDataSlab can't be inlined", m.header.slabID)) +} + +func (m *MapMetaDataSlab) Uninline(_ SlabStorage) error { + return NewFatalError(fmt.Errorf("failed to uninline MapMetaDataSlab %s: MapMetaDataSlab is already unlined", m.header.slabID)) +} + func (m *MapMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { if m.extraData == nil { return nil, NewNotValueError(m.SlabID()) @@ -3050,42 +4003,114 @@ func (m *MapMetaDataSlab) ChildStorables() []Storable { childIDs[i] = SlabIDStorable(h.slabID) } - return childIDs -} + return childIDs +} + +func (m *MapMetaDataSlab) getChildSlabByDigest(storage SlabStorage, hkey Digest, key Value) (MapSlab, int, error) { + + ans := -1 + i, j := 0, len(m.childrenHeaders) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + if m.childrenHeaders[h].firstKey > hkey { + j = h + } else { + ans = h + i = h + 1 + } + } + + if ans == -1 { + return nil, 0, NewKeyNotFoundError(key) + } + + childHeaderIndex := ans + + childID := m.childrenHeaders[childHeaderIndex].slabID + + child, err := getMapSlab(storage, childID) + if err != nil { + return nil, 0, err + } + + return child, childHeaderIndex, nil +} + +func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { + child, _, err := m.getChildSlabByDigest(storage, hkey, key) + if err != nil { + return nil, nil, err + } + + // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). + return child.Get(storage, digester, level, hkey, comparator, key) +} + +func (m *MapMetaDataSlab) getElementAndNextKey( + storage SlabStorage, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + key Value, +) (MapKey, MapValue, MapKey, error) { + child, index, err := m.getChildSlabByDigest(storage, hkey, key) + if err != nil { + return nil, nil, nil, err + } + + k, v, nextKey, err := child.getElementAndNextKey(storage, digester, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + if nextKey != nil { + // Next element is still in the same child slab. + return k, v, nextKey, nil + } + + // Next element is in the next child slab. -func (m *MapMetaDataSlab) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { + nextIndex := index + 1 - ans := -1 - i, j := 0, len(m.childrenHeaders) - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - if m.childrenHeaders[h].firstKey > hkey { - j = h - } else { - ans = h - i = h + 1 + switch { + case nextIndex < len(m.childrenHeaders): + // Next element is in the next child of this MapMetaDataSlab. + nextChildID := m.childrenHeaders[nextIndex].slabID + + nextChild, err := getMapSlab(storage, nextChildID) + if err != nil { + return nil, nil, nil, err } - } - if ans == -1 { - return nil, NewKeyNotFoundError(key) - } + nextKey, err = firstKeyInMapSlab(storage, nextChild) + if err != nil { + return nil, nil, nil, err + } - childHeaderIndex := ans + return k, v, nextKey, nil - childID := m.childrenHeaders[childHeaderIndex].slabID + case nextIndex == len(m.childrenHeaders): + // Next element is outside this MapMetaDataSlab, so nextKey is nil. + return k, v, nil, nil - child, err := getMapSlab(storage, childID) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + default: // nextIndex > len(m.childrenHeaders) + // This should never happen. + return nil, nil, nil, NewUnreachableError() } - - // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). - return child.Get(storage, digester, level, hkey, comparator, key) } -func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Digester, level uint, hkey Digest, comparator ValueComparator, hip HashInputProvider, key Value, value Value) (MapValue, error) { +func (m *MapMetaDataSlab) Set( + storage SlabStorage, + b DigesterBuilder, + digester Digester, + level uint, + hkey Digest, + comparator ValueComparator, + hip HashInputProvider, + key Value, + value Value, +) (MapKey, MapValue, error) { ans := 0 i, j := 0, len(m.childrenHeaders) @@ -3106,13 +4131,13 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D child, err := getMapSlab(storage, childID) if err != nil { // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err + return nil, nil, err } - existingValue, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := child.Set(storage, b, digester, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). - return nil, err + return nil, nil, err } m.childrenHeaders[childHeaderIndex] = child.Header() @@ -3126,26 +4151,25 @@ func (m *MapMetaDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester D err := m.SplitChildSlab(storage, child, childHeaderIndex) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.SplitChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } if underflowSize, underflow := child.IsUnderflow(); underflow { err := m.MergeOrRebalanceChildSlab(storage, child, childHeaderIndex, underflowSize) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapMetaDataSlab.MergeOrRebalanceChildSlab(). - return nil, err + return nil, nil, err } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } - return existingValue, nil + return keyStorable, existingMapValueStorable, nil } func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapKey, MapValue, error) { @@ -3207,10 +4231,9 @@ func (m *MapMetaDataSlab) Remove(storage SlabStorage, digester Digester, level u return k, v, nil } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return nil, nil, err } return k, v, nil } @@ -3237,25 +4260,17 @@ func (m *MapMetaDataSlab) SplitChildSlab(storage SlabStorage, child MapSlab, chi m.header.size += mapSlabHeaderSize // Store modified slabs - err = storage.Store(left.SlabID(), left) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - - err = storage.Store(right.SlabID(), right) + err = storeSlab(storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + err = storeSlab(storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } - return nil + return storeSlab(storage, m) } // MergeOrRebalanceChildSlab merges or rebalances child slab. @@ -3328,24 +4343,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } // Rebalance with left sib @@ -3361,24 +4369,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.childrenHeaders[childHeaderIndex] = child.Header() // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } // Rebalance with bigger sib @@ -3394,24 +4395,18 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.childrenHeaders[childHeaderIndex] = child.Header() // Store modified slabs - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) + } else { // leftSib.ByteSize() <= rightSib.ByteSize @@ -3430,24 +4425,17 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(rightSib.SlabID(), rightSib) + err = storeSlab(storage, rightSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rightSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) - } - return nil + return storeSlab(storage, m) } } @@ -3476,15 +4464,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove right sib from storage @@ -3514,15 +4501,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.header.size -= mapSlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove child from storage @@ -3551,15 +4537,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( m.header.size -= mapSlabHeaderSize // Store modified slabs in storage - err = storage.Store(leftSib.SlabID(), leftSib) + err = storeSlab(storage, leftSib) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", leftSib.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove child from storage @@ -3592,15 +4577,14 @@ func (m *MapMetaDataSlab) MergeOrRebalanceChildSlab( } // Store modified slabs in storage - err = storage.Store(child.SlabID(), child) + err = storeSlab(storage, child) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", child.SlabID())) + return err } - err = storage.Store(m.header.slabID, m) + + err = storeSlab(storage, m) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + return err } // Remove rightSib from storage @@ -3850,10 +4834,9 @@ func NewMap(storage SlabStorage, address Address, digestBuilder DigesterBuilder, extraData: extraData, } - err = storage.Store(root.header.slabID, root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.header.slabID)) + return nil, err } return &OrderedMap{ @@ -3888,8 +4871,142 @@ func NewMapWithRootID(storage SlabStorage, rootID SlabID, digestBuilder Digester }, nil } +func (m *OrderedMap) Inlined() bool { + return m.root.Inlined() +} + +func (m *OrderedMap) Inlinable(maxInlineSize uint64) bool { + return m.root.Inlinable(maxInlineSize) +} + +func (m *OrderedMap) setParentUpdater(f parentUpdater) { + m.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value (child) +// so parent map (m) can be notified when child value is modified. +func (m *OrderedMap) setCallbackWithChild( + comparator ValueComparator, + hip HashInputProvider, + key Value, + child Value, + maxInlineSize uint64, +) { + c, ok := child.(mutableValueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + c.setParentUpdater(func() (found bool, err error) { + + // Avoid unnecessary write operation on parent container. + // Child value was stored as SlabIDStorable (not inlined) in parent container, + // and continues to be stored as SlabIDStorable (still not inlinable), + // so no update to parent container is needed. + if !c.Inlined() && !c.Inlinable(maxInlineSize) { + return true, nil + } + + // Retrieve element value under the same key and + // verify retrieved value is this child (c). + _, valueStorable, err := m.get(comparator, hip, key) + if err != nil { + var knf *KeyNotFoundError + if errors.As(err, &knf) { + return false, nil + } + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Get(). + return false, err + } + + // Verify retrieved element value is either SlabIDStorable or Slab, with identical value ID. + switch valueStorable := valueStorable.(type) { + case SlabIDStorable: + sid := SlabID(valueStorable) + if !vid.equal(sid) { + return false, nil + } + + case Slab: + sid := valueStorable.SlabID() + if !vid.equal(sid) { + return false, nil + } + + default: + return false, nil + } + + // Set child value with parent map using same key. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := m.set(comparator, hip, key, c) + if err != nil { + return false, err + } + + // Verify overwritten storable has identical value ID. + + switch existingValueStorable := existingValueStorable.(type) { + case SlabIDStorable: + sid := SlabID(existingValueStorable) + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten SlabIDStorable %s != value ID %s", + sid, + vid)) + } + + case Slab: + sid := existingValueStorable.SlabID() + if !vid.equal(sid) { + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten Slab ID %s != value ID %s", + sid, + vid)) + } + + case nil: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is nil")) + + default: + return false, NewFatalError( + fmt.Errorf( + "failed to reset child value in parent updater callback: overwritten value is wrong type %T", + existingValueStorable)) + } + + return true, nil + }) +} + +// notifyParentIfNeeded calls parent updater if this map (m) is a child +// element in another container. +func (m *OrderedMap) notifyParentIfNeeded() error { + if m.parentUpdater == nil { + return nil + } + + // If parentUpdater() doesn't find child map (m), then no-op on parent container + // and unset parentUpdater callback in child map. This can happen when child + // map is an outdated reference (removed or overwritten in parent container). + found, err := m.parentUpdater() + if err != nil { + return err + } + if !found { + m.parentUpdater = nil + } + return nil +} + func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { - _, err := m.get(comparator, hip, key) + _, _, err := m.get(comparator, hip, key) if err != nil { var knf *KeyNotFoundError if errors.As(err, &knf) { @@ -3903,26 +5020,32 @@ func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key Value) (Value, error) { - storable, err := m.get(comparator, hip, key) + keyStorable, valueStorable, err := m.get(comparator, hip, key) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). return nil, err } - v, err := storable.StoredValue(m.Storage) + v, err := valueStorable.StoredValue(m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + // As a parent, this map (m) sets up notification callback with child + // value (v) so this map can be notified when child value is modified. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) + return v, nil } -func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, error) { +func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") } defer putDigester(keyDigest) @@ -3931,14 +5054,128 @@ func (m *OrderedMap) get(comparator ValueComparator, hip HashInputProvider, key hkey, err := keyDigest.Digest(level) if err != nil { // Wrap err as external error (if needed) because err is returned by Digesert interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } // Don't need to wrap error as external error because err is already categorized by MapSlab.Get(). return m.root.Get(m.Storage, keyDigest, level, hkey, comparator, key) } -func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { +func (m *OrderedMap) getElementAndNextKey(comparator ValueComparator, hip HashInputProvider, key Value) (Value, Value, Value, error) { + + keyDigest, err := m.digesterBuilder.Digest(hip, key) + if err != nil { + // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + } + defer putDigester(keyDigest) + + level := uint(0) + + hkey, err := keyDigest.Digest(level) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Digesert interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + } + + keyStorable, valueStorable, nextKeyStorable, err := m.root.getElementAndNextKey(m.Storage, keyDigest, level, hkey, comparator, key) + if err != nil { + return nil, nil, nil, err + } + + k, err := keyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + v, err := valueStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + var nextKey Value + if nextKeyStorable != nil { + nextKey, err = nextKeyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + } + + // As a parent, this map (m) sets up notification callback with child + // value (v) so this map can be notified when child value is modified. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, v, maxInlineSize) + + return k, v, nextKey, nil +} + +func (m *OrderedMap) getNextKey(comparator ValueComparator, hip HashInputProvider, key Value) (Value, error) { + + keyDigest, err := m.digesterBuilder.Digest(hip, key) + if err != nil { + // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create map key digester") + } + defer putDigester(keyDigest) + + level := uint(0) + + hkey, err := keyDigest.Digest(level) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Digesert interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) + } + + _, _, nextKeyStorable, err := m.root.getElementAndNextKey(m.Storage, keyDigest, level, hkey, comparator, key) + if err != nil { + return nil, err + } + + if nextKeyStorable == nil { + return nil, nil + } + + nextKey, err := nextKeyStorable.StoredValue(m.Storage) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") + } + + return nextKey, nil +} + +func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { + storable, err := m.set(comparator, hip, key, value) + if err != nil { + return nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := storable.(type) { + case ArraySlab: // inlined array slab + err = s.Uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + + case MapSlab: // inlined map slab + err = s.Uninline(m.Storage) + if err != nil { + return nil, err + } + storable = SlabIDStorable(s.SlabID()) + } + + return storable, nil +} + +func (m *OrderedMap) set(comparator ValueComparator, hip HashInputProvider, key Value, value Value) (Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -3955,13 +5192,13 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to get map key digest at level %d", level)) } - existingValue, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) + keyStorable, existingMapValueStorable, err := m.root.Set(m.Storage, m.digesterBuilder, keyDigest, level, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by MapSlab.Set(). return nil, err } - if existingValue == nil { + if existingMapValueStorable == nil { m.root.ExtraData().incrementCount() } @@ -3974,7 +5211,6 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, err } - return existingValue, nil } } @@ -3986,10 +5222,63 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } } - return existingValue, nil + // This map (m) is a parent to the new child (value), and this map + // can also be a child in another container. + // + // As a parent, this map needs to setup notification callback with + // the new child value, so it can be notified when child is modified. + // + // If this map is a child, it needs to notify its parent because its + // content (maybe also its size) is changed by this "Set" operation. + + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by setting new child. + err = m.notifyParentIfNeeded() + if err != nil { + return nil, err + } + + // As a parent, this map sets up notification callback with child value + // so this map can be notified when child value is modified. + // + // Setting up notification with new child value can happen at any time + // (either before or after this map notifies its parent) because + // setting up notification doesn't trigger any read/write ops on parent or child. + maxInlineSize := maxInlineMapValueSize(uint64(keyStorable.ByteSize())) + m.setCallbackWithChild(comparator, hip, key, value, maxInlineSize) + + return existingMapValueStorable, nil } func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { + keyStorable, valueStorable, err := m.remove(comparator, hip, key) + if err != nil { + return nil, nil, err + } + + // If overwritten storable is an inlined slab, uninline the slab and store it in storage. + // This is to prevent potential data loss because the overwritten inlined slab was not in + // storage and any future changes to it would have been lost. + switch s := valueStorable.(type) { + case ArraySlab: + err = s.Uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.SlabID()) + + case MapSlab: + err = s.Uninline(m.Storage) + if err != nil { + return nil, nil, err + } + valueStorable = SlabIDStorable(s.SlabID()) + } + + return keyStorable, valueStorable, nil +} + +func (m *OrderedMap) remove(comparator ValueComparator, hip HashInputProvider, key Value) (Storable, Storable, error) { keyDigest, err := m.digesterBuilder.Digest(hip, key) if err != nil { @@ -4023,7 +5312,6 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k // Don't need to wrap error as external error because err is already categorized by OrderedMap.promoteChildAsNewRoot(). return nil, nil, err } - return k, v, nil } } @@ -4035,6 +5323,13 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k } } + // If this map is a child, it notifies parent by invoking callback because + // this map is changed by removing element. + err = m.notifyParentIfNeeded() + if err != nil { + return nil, nil, err + } + return k, v, nil } @@ -4085,22 +5380,17 @@ func (m *OrderedMap) splitRoot() error { m.root = newRoot - err = m.Storage.Store(left.SlabID(), left) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", left.SlabID())) - } - err = m.Storage.Store(right.SlabID(), right) + err = storeSlab(m.Storage, left) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", right.SlabID())) + return err } - err = m.Storage.Store(m.root.SlabID(), m.root) + + err = storeSlab(m.Storage, right) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + return err } - return nil + + return storeSlab(m.Storage, m.root) } func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { @@ -4127,10 +5417,9 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { m.root.SetExtraData(extraData) - err = m.Storage.Store(rootID, m.root) + err = storeSlab(m.Storage, m.root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", rootID)) + return err } err = m.Storage.Remove(childID) @@ -4142,25 +5431,61 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { } func (m *OrderedMap) SlabID() SlabID { + if m.root.Inlined() { + return SlabIDUndefined + } return m.root.SlabID() } func (m *OrderedMap) ValueID() ValueID { - sid := m.SlabID() + return slabIDToValueID(m.root.SlabID()) +} - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) +// Storable returns OrderedMap m as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { - return id -} + inlined := m.root.Inlined() + inlinable := m.root.Inlinable(maxInlineSize) -func (m *OrderedMap) StoredValue(_ SlabStorage) (Value, error) { - return m, nil -} + switch { + + case inlinable && inlined: + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return m.root, nil + + case !inlinable && !inlined: + // Root slab is not inlinable and was not inlined. + // Return root slab as storable, no size adjustment and change to storage. + return SlabIDStorable(m.SlabID()), nil + + case inlinable && !inlined: + // Root slab is inlinable and was NOT inlined. -func (m *OrderedMap) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(m.SlabID()), nil + // Inline root data slab. + err := m.root.Inline(m.Storage) + if err != nil { + return nil, err + } + + return m.root, nil + + case !inlinable && inlined: + // Root slab is NOT inlinable and was inlined. + + // Uninline root slab. + err := m.root.Uninline(m.Storage) + if err != nil { + return nil, err + } + + return SlabIDStorable(m.SlabID()), nil + + default: + panic("not reachable") + } } func (m *OrderedMap) Count() uint64 { @@ -4178,8 +5503,27 @@ func (m *OrderedMap) Type() TypeInfo { return nil } +func (m *OrderedMap) SetType(typeInfo TypeInfo) error { + extraData := m.root.ExtraData() + extraData.TypeInfo = typeInfo + + m.root.SetExtraData(extraData) + + if m.Inlined() { + // Map is inlined. + + // Notify parent container so parent slab is saved in storage with updated TypeInfo of inlined array. + return m.notifyParentIfNeeded() + } + + // Map is standalone. + + // Store modified root slab in storage since typeInfo is part of extraData stored in root slab. + return storeSlab(m.Storage, m.root) +} + func (m *OrderedMap) String() string { - iterator, err := m.Iterator() + iterator, err := m.ReadOnlyIterator() if err != nil { return err.Error() } @@ -4215,19 +5559,24 @@ func getMapSlab(storage SlabStorage, id SlabID) (MapSlab, error) { return mapSlab, nil } -func firstMapDataSlab(storage SlabStorage, slab MapSlab) (MapSlab, error) { - if slab.IsData() { +func firstMapDataSlab(storage SlabStorage, slab MapSlab) (*MapDataSlab, error) { + switch slab := slab.(type) { + case *MapDataSlab: return slab, nil + + case *MapMetaDataSlab: + firstChildID := slab.childrenHeaders[0].slabID + firstChild, err := getMapSlab(storage, firstChildID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return nil, err + } + // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). + return firstMapDataSlab(storage, firstChild) + + default: + return nil, NewUnreachableError() } - meta := slab.(*MapMetaDataSlab) - firstChildID := meta.childrenHeaders[0].slabID - firstChild, err := getMapSlab(storage, firstChildID) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return nil, err - } - // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). - return firstMapDataSlab(storage, firstChild) } func (m *MapExtraData) incrementCount() { @@ -4238,19 +5587,19 @@ func (m *MapExtraData) decrementCount() { m.Count-- } -type MapElementIterator struct { +type mapElementIterator struct { storage SlabStorage elements elements index int - nestedIterator *MapElementIterator + nestedIterator *mapElementIterator } -func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { +func (i *mapElementIterator) next() (key MapKey, value MapValue, err error) { if i.nestedIterator != nil { - key, value, err = i.nestedIterator.Next() + key, value, err = i.nestedIterator.next() if err != nil { - // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). + // Don't need to wrap error as external error because err is already categorized by mapElementIterator.next(). return nil, nil, err } if key != nil { @@ -4281,14 +5630,14 @@ func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { return nil, nil, err } - i.nestedIterator = &MapElementIterator{ + i.nestedIterator = &mapElementIterator{ storage: i.storage, elements: elems, } i.index++ // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). - return i.nestedIterator.Next() + return i.nestedIterator.next() default: return nil, nil, NewSlabDataError(fmt.Errorf("unexpected element type %T during map iteration", e)) @@ -4298,15 +5647,139 @@ func (i *MapElementIterator) Next() (key MapKey, value MapValue, err error) { type MapEntryIterationFunc func(Value, Value) (resume bool, err error) type MapElementIterationFunc func(Value) (resume bool, err error) -type MapIterator struct { - storage SlabStorage - id SlabID - elemIterator *MapElementIterator +type MapIterator interface { + CanMutate() bool + Next() (Value, Value, error) + NextKey() (Value, error) + NextValue() (Value, error) +} + +type emptyMapIterator struct { + readOnly bool +} + +var _ MapIterator = &emptyMapIterator{} + +var emptyMutableMapIterator = &emptyMapIterator{readOnly: false} +var emptyReadOnlyMapIterator = &emptyMapIterator{readOnly: true} + +func (i *emptyMapIterator) CanMutate() bool { + return !i.readOnly +} + +func (*emptyMapIterator) Next() (Value, Value, error) { + return nil, nil, nil +} + +func (*emptyMapIterator) NextKey() (Value, error) { + return nil, nil +} + +func (*emptyMapIterator) NextValue() (Value, error) { + return nil, nil +} + +type mutableMapIterator struct { + m *OrderedMap + comparator ValueComparator + hip HashInputProvider + nextKey Value +} + +var _ MapIterator = &mutableMapIterator{} + +func (i *mutableMapIterator) CanMutate() bool { + return true +} + +func (i *mutableMapIterator) Next() (Value, Value, error) { + if i.nextKey == nil { + // No more elements. + return nil, nil, nil + } + + // Don't need to set up notification callback for v because + // getElementAndNextKey() returns value with notification already. + k, v, nk, err := i.m.getElementAndNextKey(i.comparator, i.hip, i.nextKey) + if err != nil { + return nil, nil, err + } + + i.nextKey = nk + + return k, v, nil +} + +func (i *mutableMapIterator) NextKey() (Value, error) { + if i.nextKey == nil { + // No more elements. + return nil, nil + } + + key := i.nextKey + + nk, err := i.m.getNextKey(i.comparator, i.hip, key) + if err != nil { + return nil, err + } + + i.nextKey = nk + + return key, nil +} + +func (i *mutableMapIterator) NextValue() (Value, error) { + if i.nextKey == nil { + // No more elements. + return nil, nil + } + + // Don't need to set up notification callback for v because + // getElementAndNextKey() returns value with notification already. + _, v, nk, err := i.m.getElementAndNextKey(i.comparator, i.hip, i.nextKey) + if err != nil { + return nil, err + } + + i.nextKey = nk + + return v, nil +} + +type ReadOnlyMapIteratorMutationCallback func(mutatedValue Value) + +type readOnlyMapIterator struct { + m *OrderedMap + nextDataSlabID SlabID + elemIterator *mapElementIterator + keyMutationCallback ReadOnlyMapIteratorMutationCallback + valueMutationCallback ReadOnlyMapIteratorMutationCallback +} + +// defaultReadOnlyMapIteratorMutatinCallback is no-op. +var defaultReadOnlyMapIteratorMutatinCallback ReadOnlyMapIteratorMutationCallback = func(Value) {} + +var _ MapIterator = &readOnlyMapIterator{} + +func (i *readOnlyMapIterator) setMutationCallback(key, value Value) { + if k, ok := key.(mutableValueNotifier); ok { + k.setParentUpdater(func() (found bool, err error) { + i.keyMutationCallback(key) + return true, NewReadOnlyIteratorElementMutationError(i.m.ValueID(), k.ValueID()) + }) + } + + if v, ok := value.(mutableValueNotifier); ok { + v.setParentUpdater(func() (found bool, err error) { + i.valueMutationCallback(value) + return true, NewReadOnlyIteratorElementMutationError(i.m.ValueID(), v.ValueID()) + }) + } } -func (i *MapIterator) Next() (key Value, value Value, err error) { +func (i *readOnlyMapIterator) Next() (key Value, value Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil, nil } @@ -4318,24 +5791,26 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { } var ks, vs Storable - ks, vs, err = i.elemIterator.Next() + ks, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") } - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + i.setMutationCallback(key, value) + return key, value, nil } @@ -4345,9 +5820,9 @@ func (i *MapIterator) Next() (key Value, value Value, err error) { return i.Next() } -func (i *MapIterator) NextKey() (key Value, err error) { +func (i *readOnlyMapIterator) NextKey() (key Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil } @@ -4359,18 +5834,20 @@ func (i *MapIterator) NextKey() (key Value, err error) { } var ks Storable - ks, _, err = i.elemIterator.Next() + ks, _, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err } if ks != nil { - key, err = ks.StoredValue(i.storage) + key, err = ks.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map key's stored value") } + i.setMutationCallback(key, nil) + return key, nil } @@ -4380,9 +5857,9 @@ func (i *MapIterator) NextKey() (key Value, err error) { return i.NextKey() } -func (i *MapIterator) NextValue() (value Value, err error) { +func (i *readOnlyMapIterator) NextValue() (value Value, err error) { if i.elemIterator == nil { - if i.id == SlabIDUndefined { + if i.nextDataSlabID == SlabIDUndefined { return nil, nil } @@ -4394,18 +5871,20 @@ func (i *MapIterator) NextValue() (value Value, err error) { } var vs Storable - _, vs, err = i.elemIterator.Next() + _, vs, err = i.elemIterator.next() if err != nil { // Don't need to wrap error as external error because err is already categorized by MapElementIterator.Next(). return nil, err } if vs != nil { - value, err = vs.StoredValue(i.storage) + value, err = vs.StoredValue(i.m.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get map value's stored value") } + i.setMutationCallback(nil, value) + return value, nil } @@ -4415,58 +5894,130 @@ func (i *MapIterator) NextValue() (value Value, err error) { return i.NextValue() } -func (i *MapIterator) advance() error { - slab, found, err := i.storage.Retrieve(i.id) +func (i *readOnlyMapIterator) advance() error { + slab, found, err := i.m.Storage.Retrieve(i.nextDataSlabID) if err != nil { // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.id)) + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", i.nextDataSlabID)) } if !found { - return NewSlabNotFoundErrorf(i.id, "slab not found during map iteration") + return NewSlabNotFoundErrorf(i.nextDataSlabID, "slab not found during map iteration") } dataSlab, ok := slab.(*MapDataSlab) if !ok { - return NewSlabDataErrorf("slab %s isn't MapDataSlab", i.id) + return NewSlabDataErrorf("slab %s isn't MapDataSlab", i.nextDataSlabID) } - i.id = dataSlab.next + i.nextDataSlabID = dataSlab.next - i.elemIterator = &MapElementIterator{ - storage: i.storage, + i.elemIterator = &mapElementIterator{ + storage: i.m.Storage, elements: dataSlab.elements, } return nil } -func (m *OrderedMap) Iterator() (*MapIterator, error) { - slab, err := firstMapDataSlab(m.Storage, m.root) +func (i *readOnlyMapIterator) CanMutate() bool { + return false +} + +// Iterator returns mutable iterator for map elements. +// Mutable iterator handles: +// - indirect element mutation, such as modifying nested container +// - direct element mutation, such as overwriting existing element with new element +// Mutable iterator doesn't handle: +// - inserting new elements into the map +// - removing existing elements from the map +// NOTE: Use readonly iterator if mutation is not needed for better performance. +func (m *OrderedMap) Iterator(comparator ValueComparator, hip HashInputProvider) (MapIterator, error) { + if m.Count() == 0 { + return emptyMutableMapIterator, nil + } + + keyStorable, err := firstKeyInMapSlab(m.Storage, m.root) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by firstKeyInMapSlab(). + return nil, err + } + + if keyStorable == nil { + // This should never happen because m.Count() > 0. + return nil, NewSlabDataErrorf("failed to find first key in map while map count > 0") + } + + key, err := keyStorable.StoredValue(m.Storage) + if err != nil { + return nil, err + } + + return &mutableMapIterator{ + m: m, + comparator: comparator, + hip: hip, + nextKey: key, + }, nil +} + +// ReadOnlyIterator returns readonly iterator for map elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use ReadOnlyIteratorWithMutationCallback(). +func (m *OrderedMap) ReadOnlyIterator() (MapIterator, error) { + return m.ReadOnlyIteratorWithMutationCallback(nil, nil) +} + +// ReadOnlyIteratorWithMutationCallback returns readonly iterator for map elements. +// keyMutatinCallback and valueMutationCallback are useful for logging, etc. with +// more context when mutation occurs. Mutation handling here is the same with or +// without these callbacks. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback and valueMutationCallback are called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use ReadOnlyIterator(). +func (m *OrderedMap) ReadOnlyIteratorWithMutationCallback( + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) (MapIterator, error) { + if m.Count() == 0 { + return emptyReadOnlyMapIterator, nil + } + + dataSlab, err := firstMapDataSlab(m.Storage, m.root) if err != nil { // Don't need to wrap error as external error because err is already categorized by firstMapDataSlab(). return nil, err } - dataSlab := slab.(*MapDataSlab) + if keyMutatinCallback == nil { + keyMutatinCallback = defaultReadOnlyMapIteratorMutatinCallback + } + + if valueMutationCallback == nil { + valueMutationCallback = defaultReadOnlyMapIteratorMutatinCallback + } - return &MapIterator{ - storage: m.Storage, - id: dataSlab.next, - elemIterator: &MapElementIterator{ + return &readOnlyMapIterator{ + m: m, + nextDataSlabID: dataSlab.next, + elemIterator: &mapElementIterator{ storage: m.Storage, elements: dataSlab.elements, }, + keyMutationCallback: keyMutatinCallback, + valueMutationCallback: valueMutationCallback, }, nil } -func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { - - iterator, err := m.Iterator() - if err != nil { - // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). - return err - } - +func iterateMap(iterator MapIterator, fn MapEntryIterationFunc) error { + var err error var key, value Value for { key, value, err = iterator.Next() @@ -4488,14 +6039,54 @@ func (m *OrderedMap) Iterate(fn MapEntryIterationFunc) error { } } -func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { - - iterator, err := m.Iterator() +func (m *OrderedMap) Iterate(comparator ValueComparator, hip HashInputProvider, fn MapEntryIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err } + return iterateMap(iterator, fn) +} + +// IterateReadOnly iterates readonly map elements. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyWithMutationCallback(). +func (m *OrderedMap) IterateReadOnly( + fn MapEntryIterationFunc, +) error { + return m.IterateReadOnlyWithMutationCallback(fn, nil, nil) +} + +// IterateReadOnlyWithMutationCallback iterates readonly map elements. +// keyMutatinCallback and valueMutationCallback are useful for logging, etc. with +// more context when mutation occurs. Mutation handling here is the same with or +// without these callbacks. +// If elements are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback/valueMutationCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnly(). +func (m *OrderedMap) IterateReadOnlyWithMutationCallback( + fn MapEntryIterationFunc, + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(keyMutatinCallback, valueMutationCallback) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMap(iterator, fn) +} +func iterateMapKeys(iterator MapIterator, fn MapElementIterationFunc) error { + var err error var key Value for { key, err = iterator.NextKey() @@ -4517,14 +6108,53 @@ func (m *OrderedMap) IterateKeys(fn MapElementIterationFunc) error { } } -func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { - - iterator, err := m.Iterator() +func (m *OrderedMap) IterateKeys(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). return err } + return iterateMapKeys(iterator, fn) +} + +// IterateReadOnlyKeys iterates readonly map keys. +// If keys are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of key containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyKeysWithMutationCallback(). +func (m *OrderedMap) IterateReadOnlyKeys( + fn MapElementIterationFunc, +) error { + return m.IterateReadOnlyKeysWithMutationCallback(fn, nil) +} + +// IterateReadOnlyKeysWithMutationCallback iterates readonly map keys. +// keyMutatinCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If keys are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of key containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyKeys(). +func (m *OrderedMap) IterateReadOnlyKeysWithMutationCallback( + fn MapElementIterationFunc, + keyMutatinCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(keyMutatinCallback, nil) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMapKeys(iterator, fn) +} +func iterateMapValues(iterator MapIterator, fn MapElementIterationFunc) error { + var err error var value Value for { value, err = iterator.NextValue() @@ -4546,6 +6176,51 @@ func (m *OrderedMap) IterateValues(fn MapElementIterationFunc) error { } } +func (m *OrderedMap) IterateValues(comparator ValueComparator, hip HashInputProvider, fn MapElementIterationFunc) error { + iterator, err := m.Iterator(comparator, hip) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.Iterator(). + return err + } + return iterateMapValues(iterator, fn) +} + +// IterateReadOnlyValues iterates readonly map values. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback is needed (e.g. for logging mutation, etc.), use IterateReadOnlyValuesWithMutationCallback(). +func (m *OrderedMap) IterateReadOnlyValues( + fn MapElementIterationFunc, +) error { + return m.IterateReadOnlyValuesWithMutationCallback(fn, nil) +} + +// IterateReadOnlyValuesWithMutationCallback iterates readonly map values. +// valueMutationCallback is useful for logging, etc. with more context +// when mutation occurs. Mutation handling here is the same with or +// without this callback. +// If values are mutated: +// - those changes are not guaranteed to persist. +// - mutation functions of child containers return ReadOnlyIteratorElementMutationError. +// - keyMutatinCallback is called if provided +// NOTE: +// Use readonly iterator if mutation is not needed for better performance. +// If callback isn't needed, use IterateReadOnlyValues(). +func (m *OrderedMap) IterateReadOnlyValuesWithMutationCallback( + fn MapElementIterationFunc, + valueMutationCallback ReadOnlyMapIteratorMutationCallback, +) error { + iterator, err := m.ReadOnlyIteratorWithMutationCallback(nil, valueMutationCallback) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by OrderedMap.ReadOnlyIterator(). + return err + } + return iterateMapValues(iterator, fn) +} + type MapPopIterationFunc func(Storable, Storable) // PopIterate iterates and removes elements backward. @@ -4564,22 +6239,32 @@ func (m *OrderedMap) PopIterate(fn MapPopIterationFunc) error { extraData := m.root.ExtraData() extraData.Count = 0 + inlined := m.root.Inlined() + + prefixSize := uint32(mapRootDataSlabPrefixSize) + if inlined { + prefixSize = uint32(inlinedMapDataSlabPrefixSize) + } + // Set root to empty data slab m.root = &MapDataSlab{ header: MapSlabHeader{ slabID: rootID, - size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + size: prefixSize + hkeyElementsPrefixSize, }, elements: newHkeyElements(0), extraData: extraData, + inlined: inlined, } - // Save root slab - err = m.Storage.Store(m.root.SlabID(), m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + if !m.Inlined() { + // Save root slab + err = storeSlab(m.Storage, m.root) + if err != nil { + return err + } } + return nil } @@ -4672,12 +6357,12 @@ func NewMapFromBatchData( prevElem := elements.elems[lastElementIndex] prevElemSize := prevElem.Size() - elem, existingValue, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) + elem, _, existingMapValueStorable, err := prevElem.Set(storage, address, digesterBuilder, digester, 0, hkey, comparator, hip, key, value) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Set(). return nil, err } - if existingValue != nil { + if existingMapValueStorable != nil { return nil, NewDuplicateKeyError(key) } @@ -4805,10 +6490,9 @@ func NewMapFromBatchData( // Store all slabs for _, slab := range slabs { - err = storage.Store(slab.SlabID(), slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", slab.SlabID())) + return nil, err } } @@ -4835,10 +6519,9 @@ func NewMapFromBatchData( root.SetExtraData(extraData) // Store root - err = storage.Store(root.SlabID(), root) + err = storeSlab(storage, root) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", root.SlabID())) + return nil, err } return &OrderedMap{ @@ -5132,8 +6815,8 @@ func (i *MapLoadedValueIterator) Next() (Value, Value, error) { return nil, nil, nil } -// LoadedValueIterator returns iterator to iterate loaded map elements. -func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { +// ReadOnlyLoadedValueIterator returns iterator to iterate loaded map elements. +func (m *OrderedMap) ReadOnlyLoadedValueIterator() (*MapLoadedValueIterator, error) { switch slab := m.root.(type) { case *MapDataSlab: @@ -5171,9 +6854,9 @@ func (m *OrderedMap) LoadedValueIterator() (*MapLoadedValueIterator, error) { } } -// IterateLoadedValues iterates loaded map values. -func (m *OrderedMap) IterateLoadedValues(fn MapEntryIterationFunc) error { - iterator, err := m.LoadedValueIterator() +// IterateReadOnlyLoadedValues iterates loaded map values. +func (m *OrderedMap) IterateReadOnlyLoadedValues(fn MapEntryIterationFunc) error { + iterator, err := m.ReadOnlyLoadedValueIterator() if err != nil { // Don't need to wrap error as external error because err is already categorized by OrderedMap.LoadedValueIterator(). return err @@ -5199,3 +6882,49 @@ func (m *OrderedMap) IterateLoadedValues(fn MapEntryIterationFunc) error { } } } + +func firstKeyInMapSlab(storage SlabStorage, slab MapSlab) (MapKey, error) { + dataSlab, err := firstMapDataSlab(storage, slab) + if err != nil { + return nil, err + } + return firstKeyInElements(storage, dataSlab.elements) +} + +func firstKeyInElements(storage SlabStorage, elems elements) (MapKey, error) { + switch elements := elems.(type) { + case *hkeyElements: + if len(elements.elems) == 0 { + return nil, nil + } + firstElem := elements.elems[0] + return firstKeyInElement(storage, firstElem) + + case *singleElements: + if len(elements.elems) == 0 { + return nil, nil + } + firstElem := elements.elems[0] + return firstElem.key, nil + + default: + return nil, NewUnreachableError() + } +} + +func firstKeyInElement(storage SlabStorage, elem element) (MapKey, error) { + switch elem := elem.(type) { + case *singleElement: + return elem.key, nil + + case elementGroup: + group, err := elem.Elements(storage) + if err != nil { + return nil, err + } + return firstKeyInElements(storage, group) + + default: + return nil, NewUnreachableError() + } +} diff --git a/map_debug.go b/map_debug.go index 915b003..7e5aea8 100644 --- a/map_debug.go +++ b/map_debug.go @@ -66,53 +66,58 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { return MapStats{}, err } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: dataSlabCount++ - leaf := slab.(*MapDataSlab) - elementGroups := []elements{leaf.elements} + elementGroups := []elements{slab.elements} for len(elementGroups) > 0 { var nestedElementGroups []elements - for i := 0; i < len(elementGroups); i++ { - - elems := elementGroups[i] - - for j := 0; j < int(elems.Count()); j++ { - elem, err := elems.Element(j) + for _, group := range elementGroups { + for i := 0; i < int(group.Count()); i++ { + elem, err := group.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return MapStats{}, err } - if group, ok := elem.(elementGroup); ok { - if !group.Inline() { + switch e := elem.(type) { + case elementGroup: + nestedGroup := e + + if !nestedGroup.Inline() { collisionDataSlabCount++ } - nested, err := group.Elements(m.Storage) + nested, err := nestedGroup.Elements(m.Storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return MapStats{}, err } + nestedElementGroups = append(nestedElementGroups, nested) - } else { - e := elem.(*singleElement) + case *singleElement: if _, ok := e.key.(SlabIDStorable); ok { storableDataSlabCount++ } if _, ok := e.value.(SlabIDStorable); ok { storableDataSlabCount++ } + // This handles use case of inlined array or map value containing SlabID + ids := getSlabIDFromStorable(e.value, nil) + storableDataSlabCount += uint64(len(ids)) } } } + elementGroups = nestedElementGroups } - } else { + + case *MapMetaDataSlab: metaDataSlabCount++ for _, storable := range slab.ChildStorables() { @@ -170,12 +175,12 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return nil, err } - if slab.IsData() { - dataSlab := slab.(*MapDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) + switch slab := slab.(type) { + case *MapDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) - for i := 0; i < int(dataSlab.elements.Count()); i++ { - elem, err := dataSlab.elements.Element(i) + for i := 0; i < int(slab.elements.Count()); i++ { + elem, err := slab.elements.Element(i) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Element(). return nil, err @@ -188,16 +193,10 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } } - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(slab, overflowIDs) - } else { - meta := slab.(*MapMetaDataSlab) - dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, meta)) + case *MapMetaDataSlab: + dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, slab)) for _, storable := range slab.ChildStorables() { id, ok := storable.(SlabIDStorable) @@ -247,8 +246,30 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { return dumps, nil } -func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func VerifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool) error { + return verifyMap(m, address, typeInfo, tic, hip, inlineEnabled, map[SlabID]struct{}{}) +} + +func verifyMap(m *OrderedMap, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { + + // Verify map address (independent of array inlined status) + if address != m.Address() { + return NewFatalError(fmt.Errorf("map address %v, got %v", address, m.Address())) + } + + // Verify map value ID (independent of array inlined status) + err := verifyMapValueID(m) + if err != nil { + return err + } + // Verify map slab ID (dependent of array inlined status) + err = verifyMapSlabID(m) + if err != nil { + return err + } + + // Verify map extra data extraData := m.root.ExtraData() if extraData == nil { return NewFatalError(fmt.Errorf("root slab %d doesn't have extra data", m.root.SlabID())) @@ -270,10 +291,19 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash return NewFatalError(fmt.Errorf("root slab %d seed is uninitialized", m.root.SlabID())) } - computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - m.Storage, m.digesterBuilder, tic, hip, m.root.SlabID(), 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + v := &mapVerifier{ + storage: m.Storage, + address: address, + digesterBuilder: m.digesterBuilder, + tic: tic, + hip: hip, + inlineEnabled: inlineEnabled, + } + + computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := v.verifySlab( + m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return err } @@ -315,17 +345,23 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash return nil } -func validMapSlab( - storage SlabStorage, - digesterBuilder DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, - id SlabID, +type mapVerifier struct { + storage SlabStorage + address Address + digesterBuilder DigesterBuilder + tic TypeInfoComparator + hip HashInputProvider + inlineEnabled bool +} + +func (v *mapVerifier) verifySlab( + slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, dataSlabIDs []SlabID, nextDataSlabIDs []SlabID, firstKeys []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, _dataSlabIDs []SlabID, @@ -334,10 +370,30 @@ func validMapSlab( err error, ) { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return 0, nil, nil, nil, err + id := slab.Header().slabID + + // Verify SlabID is unique + if _, exist := slabIDs[id]; exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("found duplicate slab ID %s", id)) + } + + slabIDs[id] = struct{}{} + + // Verify slab address (independent of map inlined status) + if v.address != id.address { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("map slab address %v, got %v", v.address, id.address)) + } + + // Verify that inlined slab is not in storage + if slab.Inlined() { + _, exist, err := v.storage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storage interface. + return 0, nil, nil, nil, wrapErrorAsExternalErrorIfNeeded(err) + } + if exist { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s is in storage", id)) + } } if level > 0 { @@ -367,89 +423,155 @@ func validMapSlab( } } - if slab.IsData() { + switch slab := slab.(type) { + case *MapDataSlab: + return v.verifyDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) - } + case *MapMetaDataSlab: + return v.verifyMetaDataSlab(slab, level, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) - // Verify data slab's elements - elementCount, elementSize, err := validMapElements(storage, digesterBuilder, tic, hip, id, dataSlab.elements, 0, nil) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElements(). - return 0, nil, nil, nil, err - } + default: + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } +} - // Verify slab's first key - if dataSlab.elements.firstKey() != dataSlab.header.firstKey { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header first key %d is wrong, want %d", - id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) - } +func (v *mapVerifier) verifyDataSlab( + dataSlab *MapDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := dataSlab.header.slabID - // Verify that aggregated element size + slab prefix is the same as header.size - computedSize := uint32(mapDataSlabPrefixSize) - if level == 0 { - computedSize = uint32(mapRootDataSlabPrefixSize) - } - computedSize += elementSize + if !dataSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapDataSlab %s is not data", id)) + } - if computedSize != dataSlab.header.size { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d header size %d is wrong, want %d", - id, dataSlab.header.size, computedSize)) - } + // Verify data slab's elements + elementCount, elementSize, err := v.verifyElements(id, dataSlab.elements, 0, nil, slabIDs) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyElements(). + return 0, nil, nil, nil, err + } - // Verify any size flag - if dataSlab.anySize { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d anySize %t is wrong, want false", - id, dataSlab.anySize)) + // Verify slab's first key + if dataSlab.elements.firstKey() != dataSlab.header.firstKey { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header first key %d is wrong, want %d", + id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) + } + + // Verify that only root slab can be inlined + if dataSlab.Inlined() { + if level > 0 { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + if dataSlab.extraData == nil { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s doesn't have extra data", id)) + } + if dataSlab.next != SlabIDUndefined { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s has next slab ID", id)) } + } - // Verify collision group flag - if dataSlab.collisionGroup { - return 0, nil, nil, nil, NewFatalError( - fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", - id, dataSlab.collisionGroup)) + // Verify that aggregated element size + slab prefix is the same as header.size + computedSize := uint32(mapDataSlabPrefixSize) + if level == 0 { + computedSize = uint32(mapRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedMapDataSlabPrefixSize) } + } + computedSize += elementSize - dataSlabIDs = append(dataSlabIDs, id) + if computedSize != dataSlab.header.size { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d header size %d is wrong, want %d", + id, dataSlab.header.size, computedSize)) + } - if dataSlab.next != SlabIDUndefined { - nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) - } + // Verify any size flag + if dataSlab.anySize { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d anySize %t is wrong, want false", + id, dataSlab.anySize)) + } - firstKeys = append(firstKeys, dataSlab.header.firstKey) + // Verify collision group flag + if dataSlab.collisionGroup { + return 0, nil, nil, nil, NewFatalError( + fmt.Errorf("data slab %d collisionGroup %t is wrong, want false", + id, dataSlab.collisionGroup)) + } - return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil + dataSlabIDs = append(dataSlabIDs, id) + + if dataSlab.next != SlabIDUndefined { + nextDataSlabIDs = append(nextDataSlabIDs, dataSlab.next) } - meta, ok := slab.(*MapMetaDataSlab) - if !ok { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) + firstKeys = append(firstKeys, dataSlab.header.firstKey) + + return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil +} + +func (v *mapVerifier) verifyMetaDataSlab( + metaSlab *MapMetaDataSlab, + level int, + dataSlabIDs []SlabID, + nextDataSlabIDs []SlabID, + firstKeys []Digest, + slabIDs map[SlabID]struct{}, +) ( + elementCount uint64, + _dataSlabIDs []SlabID, + _nextDataSlabIDs []SlabID, + _firstKeys []Digest, + err error, +) { + id := metaSlab.header.slabID + + if metaSlab.IsData() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s is data", id)) + } + + if metaSlab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("MapMetaDataSlab %s can't be inlined", id)) } if level == 0 { // Verify that root slab has more than one child slabs - if len(meta.childrenHeaders) < 2 { + if len(metaSlab.childrenHeaders) < 2 { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("root metadata slab %d has %d children, want at least 2 children ", - id, len(meta.childrenHeaders))) + id, len(metaSlab.childrenHeaders))) } } elementCount = 0 - for i := 0; i < len(meta.childrenHeaders); i++ { - h := meta.childrenHeaders[i] + for i := 0; i < len(metaSlab.childrenHeaders); i++ { + h := metaSlab.childrenHeaders[i] + + childSlab, err := getMapSlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return 0, nil, nil, nil, err + } // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(storage, digesterBuilder, tic, hip, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + v.verifySlab(childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlab(). + // Don't need to wrap error as external error because err is already categorized by verifySlab(). return 0, nil, nil, nil, err } @@ -457,53 +579,50 @@ func validMapSlab( } // Verify slab header first key - if meta.childrenHeaders[0].firstKey != meta.header.firstKey { + if metaSlab.childrenHeaders[0].firstKey != metaSlab.header.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header first key %d is wrong, want %d", - id, meta.header.firstKey, meta.childrenHeaders[0].firstKey)) + id, metaSlab.header.firstKey, metaSlab.childrenHeaders[0].firstKey)) } // Verify that child slab's first keys are sorted. - sortedHKey := sort.SliceIsSorted(meta.childrenHeaders, func(i, j int) bool { - return meta.childrenHeaders[i].firstKey < meta.childrenHeaders[j].firstKey + sortedHKey := sort.SliceIsSorted(metaSlab.childrenHeaders, func(i, j int) bool { + return metaSlab.childrenHeaders[i].firstKey < metaSlab.childrenHeaders[j].firstKey }) if !sortedHKey { - return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, meta.childrenHeaders)) + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("metadata slab %d child slab's first key isn't sorted %+v", id, metaSlab.childrenHeaders)) } // Verify that child slab's first keys are unique. - if len(meta.childrenHeaders) > 1 { - prev := meta.childrenHeaders[0].firstKey - for _, h := range meta.childrenHeaders[1:] { + if len(metaSlab.childrenHeaders) > 1 { + prev := metaSlab.childrenHeaders[0].firstKey + for _, h := range metaSlab.childrenHeaders[1:] { if prev == h.firstKey { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d child header first key isn't unique %v", - id, meta.childrenHeaders)) + id, metaSlab.childrenHeaders)) } prev = h.firstKey } } // Verify slab header's size - computedSize := uint32(len(meta.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize - if computedSize != meta.header.size { + computedSize := uint32(len(metaSlab.childrenHeaders)*mapSlabHeaderSize) + mapMetaDataSlabPrefixSize + if computedSize != metaSlab.header.size { return 0, nil, nil, nil, NewFatalError( fmt.Errorf("metadata slab %d header size %d is wrong, want %d", - id, meta.header.size, computedSize)) + id, metaSlab.header.size, computedSize)) } return elementCount, dataSlabIDs, nextDataSlabIDs, firstKeys, nil } -func validMapElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyElements( id SlabID, elements elements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -512,23 +631,20 @@ func validMapElements( switch elems := elements.(type) { case *hkeyElements: - return validMapHkeyElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return v.verifyHkeyElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) case *singleElements: - return validMapSingleElements(storage, db, tic, hip, id, elems, digestLevel, hkeyPrefixes) + return v.verifySingleElements(id, elems, digestLevel, hkeyPrefixes, slabIDs) default: return 0, 0, NewFatalError(fmt.Errorf("slab %d has unknown elements type %T at digest level %d", id, elements, digestLevel)) } } -func validMapHkeyElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifyHkeyElements( id SlabID, elements *hkeyElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -572,6 +688,10 @@ func validMapHkeyElements( for i := 0; i < len(elements.elems); i++ { e := elements.elems[i] + hkeys := make([]Digest, len(hkeyPrefixes)+1) + copy(hkeys, hkeyPrefixes) + hkeys[len(hkeys)-1] = elements.hkeys[i] + elementSize += digestSize // Verify element size is <= inline size @@ -583,21 +703,17 @@ func validMapHkeyElements( } } - if group, ok := e.(elementGroup); ok { - - ge, err := group.Elements(storage) + switch e := e.(type) { + case elementGroup: + group, err := e.Elements(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by elementGroup.Elements(). return 0, 0, err } - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - - count, size, err := validMapElements(storage, db, tic, hip, id, ge, digestLevel+1, hkeys) + count, size, err := v.verifyElements(id, group, digestLevel+1, hkeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapElement(). + // Don't need to wrap error as external error because err is already categorized by verifyElements(). return 0, 0, err } @@ -616,21 +732,11 @@ func validMapHkeyElements( elementCount += count - } else { - - se, ok := e.(*singleElement) - if !ok { - return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want *singleElement", id, e)) - } - - hkeys := make([]Digest, len(hkeyPrefixes)+1) - copy(hkeys, hkeyPrefixes) - hkeys[len(hkeys)-1] = elements.hkeys[i] - + case *singleElement: // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, se, hkeys) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeys, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -644,6 +750,9 @@ func validMapHkeyElements( elementSize += computedSize elementCount++ + + default: + return 0, 0, NewFatalError(fmt.Errorf("data slab %d element type %T is wrong, want either elementGroup or *singleElement", id, e)) } } @@ -655,15 +764,12 @@ func validMapHkeyElements( return elementCount, elementSize, nil } -func validMapSingleElements( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifySingleElements( id SlabID, elements *singleElements, digestLevel uint, hkeyPrefixes []Digest, + slabIDs map[SlabID]struct{}, ) ( elementCount uint64, elementSize uint32, @@ -682,9 +788,9 @@ func validMapSingleElements( for _, e := range elements.elems { // Verify element - computedSize, maxDigestLevel, err := validSingleElement(storage, db, tic, hip, e, hkeyPrefixes) + computedSize, maxDigestLevel, err := v.verifySingleElement(e, hkeyPrefixes, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validSingleElement(). + // Don't need to wrap error as external error because err is already categorized by verifySingleElement(). return 0, 0, fmt.Errorf("data slab %d: %w", id, err) } @@ -713,42 +819,80 @@ func validMapSingleElements( return uint64(len(elements.elems)), elementSize, nil } -func validSingleElement( - storage SlabStorage, - db DigesterBuilder, - tic TypeInfoComparator, - hip HashInputProvider, +func (v *mapVerifier) verifySingleElement( e *singleElement, digests []Digest, + slabIDs map[SlabID]struct{}, ) ( size uint32, digestMaxLevel uint, err error, ) { + // Verify key storable's size is less than size limit + if e.key.ByteSize() > uint32(maxInlineMapKeySize) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element key %s size %d exceeds size limit %d", + e.key, e.key.ByteSize(), maxInlineMapKeySize, + )) + } + + // Verify value storable's size is less than size limit + valueSizeLimit := maxInlineMapValueSize(uint64(e.key.ByteSize())) + if e.value.ByteSize() > uint32(valueSizeLimit) { + return 0, 0, NewFatalError( + fmt.Errorf( + "map element value %s size %d exceeds size limit %d", + e.value, e.value.ByteSize(), valueSizeLimit, + )) + } // Verify key - kv, err := e.key.StoredValue(storage) + kv, err := e.key.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s key can't be converted to value", e)) } - err = ValidValue(kv, nil, tic, hip) + err = verifyValue(kv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s key isn't valid: %w", e, err) } // Verify value - vv, err := e.value.StoredValue(storage) + vv, err := e.value.StoredValue(v.storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Stroable interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("element %s value can't be converted to value", e)) } - err = ValidValue(vv, nil, tic, hip) + switch e := e.value.(type) { + case SlabIDStorable: + // Verify not-inlined value > inline size, or can't be inlined + if v.inlineEnabled { + err = verifyNotInlinedValueStatusAndSize(vv, uint32(valueSizeLimit)) + if err != nil { + return 0, 0, err + } + } + + case *ArrayDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined array inlined status is false")) + } + + case *MapDataSlab: + // Verify inlined element's inlined status + if !e.Inlined() { + return 0, 0, NewFatalError(fmt.Errorf("inlined map inlined status is false")) + } + } + + err = verifyValue(vv, v.address, nil, v.tic, v.hip, v.inlineEnabled, slabIDs) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValue(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return 0, 0, fmt.Errorf("element %s value isn't valid: %w", e, err) } @@ -759,7 +903,7 @@ func validSingleElement( } // Verify digest - digest, err := db.Digest(hip, kv) + digest, err := v.digesterBuilder.Digest(v.hip, kv) if err != nil { // Wrap err as external error (if needed) because err is returned by DigesterBuilder interface. return 0, 0, wrapErrorfAsExternalErrorIfNeeded(err, "failed to create digester") @@ -778,21 +922,21 @@ func validSingleElement( return computedSize, digest.Levels(), nil } -func ValidValue(value Value, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider) error { +func verifyValue(value Value, address Address, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInputProvider, inlineEnabled bool, slabIDs map[SlabID]struct{}) error { switch v := value.(type) { case *Array: - return ValidArray(v, typeInfo, tic, hip) + return verifyArray(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) case *OrderedMap: - return ValidMap(v, typeInfo, tic, hip) + return verifyMap(v, address, typeInfo, tic, hip, inlineEnabled, slabIDs) } return nil } -// ValidMapSerialization traverses ordered map tree and verifies serialization +// VerifyMapSerialization traverses ordered map tree and verifies serialization // by encoding, decoding, and re-encoding slabs. // It compares in-memory objects of original slab with decoded slab. // It also compares encoded data of original slab with encoded data of decoded slab. -func ValidMapSerialization( +func VerifyMapSerialization( m *OrderedMap, cborDecMode cbor.DecMode, cborEncMode cbor.EncMode, @@ -800,159 +944,148 @@ func ValidMapSerialization( decodeTypeInfo TypeInfoDecoder, compare StorableComparator, ) error { - return validMapSlabSerialization( - m.Storage, - m.root.SlabID(), - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + // Skip verification of inlined map serialization. + if m.Inlined() { + return nil + } + + v := &serializationVerifier{ + storage: m.Storage, + cborDecMode: cborDecMode, + cborEncMode: cborEncMode, + decodeStorable: decodeStorable, + decodeTypeInfo: decodeTypeInfo, + compare: compare, + } + return v.verifyMapSlab(m.root) } -func validMapSlabSerialization( - storage SlabStorage, - id SlabID, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) verifyMapSlab(slab MapSlab) error { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return err - } + id := slab.SlabID() // Encode slab - data, err := Encode(slab, cborEncMode) + data, err := EncodeSlab(slab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } // Decode encoded slab - decodedSlab, err := DecodeSlab(id, data, cborDecMode, decodeStorable, decodeTypeInfo) + decodedSlab, err := DecodeSlab(id, data, v.cborDecMode, v.decodeStorable, v.decodeTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by DecodeSlab(). return err } // Re-encode decoded slab - dataFromDecodedSlab, err := Encode(decodedSlab, cborEncMode) + dataFromDecodedSlab, err := EncodeSlab(decodedSlab, v.cborEncMode) if err != nil { // Don't need to wrap error as external error because err is already categorized by Encode(). return err } + // Verify encoding is deterministic (encoded data of original slab is same as encoded data of decoded slab) + if !bytes.Equal(data, dataFromDecodedSlab) { + return NewFatalError(fmt.Errorf("encoded data of original slab %s is different from encoded data of decoded slab, got %v, want %v", + id, dataFromDecodedSlab, data)) + } + // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined compact map because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // compact map extra data section for reuse, and only compact map field + // values are encoded in non-extra data section. + // This reduces encoding size because compact map values of the same + // compact map type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined compact map by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError( - fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) - } - - // Compare encoded data of original slab with encoded data of decoded slab - if !bytes.Equal(data, dataFromDecodedSlab) { - return NewFatalError( - fmt.Errorf("slab %d encoded data is different from decoded slab's encoded data, got %v, want %v", - id, dataFromDecodedSlab, data)) - } - - if slab.IsData() { - dataSlab, ok := slab.(*MapDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapDataSlab", id)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError( + fmt.Errorf("slab %d encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) } + } + switch slab := slab.(type) { + case *MapDataSlab: decodedDataSlab, ok := decodedSlab.(*MapDataSlab) if !ok { return NewFatalError(fmt.Errorf("decoded slab %d is not MapDataSlab", id)) } // Compare slabs - err = mapDataSlabEqual( - dataSlab, - decodedDataSlab, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapDataSlabEqual(slab, decodedDataSlab) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapDataSlabEqual(). return fmt.Errorf("data slab %d round-trip serialization failed: %w", id, err) } return nil - } - metaSlab, ok := slab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("slab %d is not MapMetaDataSlab", id)) - } + case *MapMetaDataSlab: + decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) + } - decodedMetaSlab, ok := decodedSlab.(*MapMetaDataSlab) - if !ok { - return NewFatalError(fmt.Errorf("decoded slab %d is not MapMetaDataSlab", id)) - } + // Compare slabs + err = v.mapMetaDataSlabEqual(slab, decodedMetaSlab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). + return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) + } - // Compare slabs - err = mapMetaDataSlabEqual(metaSlab, decodedMetaSlab) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by mapMetaDataSlabEqual(). - return fmt.Errorf("metadata slab %d round-trip serialization failed: %w", id, err) - } + for _, h := range slab.childrenHeaders { + slab, err := getMapSlab(v.storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } - for _, h := range metaSlab.childrenHeaders { - // Verify child slabs - err = validMapSlabSerialization( - storage, - h.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). - return err + // Verify child slabs + err = v.verifyMapSlab(slab) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). + return err + } } - } - return nil + return nil + + default: + return NewFatalError(fmt.Errorf("MapSlab is either *MapDataSlab or *MapMetaDataSlab, got %T", slab)) + } } -func mapDataSlabEqual( - expected *MapDataSlab, - actual *MapDataSlab, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapDataSlabEqual(expected, actual *MapDataSlab) error { + + _, _, _, actualDecodedFromCompactMap := expected.canBeEncodedAsCompactMap() // Compare extra data - err := mapExtraDataEqual(expected.extraData, actual.extraData) + err := mapExtraDataEqual(expected.extraData, actual.extraData, actualDecodedFromCompactMap) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapExtraDataEqual(). return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -969,21 +1102,19 @@ func mapDataSlabEqual( } // Compare header - if !reflect.DeepEqual(expected.header, actual.header) { + if actualDecodedFromCompactMap { + if expected.header.slabID != actual.header.slabID { + return NewFatalError(fmt.Errorf("header.slabID %s is wrong, want %s", actual.header.slabID, expected.header.slabID)) + } + if expected.header.size != actual.header.size { + return NewFatalError(fmt.Errorf("header.size %d is wrong, want %d", actual.header.size, expected.header.size)) + } + } else if !reflect.DeepEqual(expected.header, actual.header) { return NewFatalError(fmt.Errorf("header %+v is wrong, want %+v", actual.header, expected.header)) } // Compare elements - err = mapElementsEqual( - expected.elements, - actual.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.mapElementsEqual(expected.elements, actual.elements, actualDecodedFromCompactMap) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapElementsEqual(). return err @@ -992,16 +1123,7 @@ func mapDataSlabEqual( return nil } -func mapElementsEqual( - expected elements, - actual elements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementsEqual(expected, actual elements, actualDecodedFromCompactMap bool) error { switch expectedElems := expected.(type) { case *hkeyElements: @@ -1009,48 +1131,21 @@ func mapElementsEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapHkeyElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapHkeyElementsEqual(expectedElems, actualElems, actualDecodedFromCompactMap) case *singleElements: actualElems, ok := actual.(*singleElements) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementsEqual( - expectedElems, - actualElems, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementsEqual(expectedElems, actualElems) } return nil } -func mapHkeyElementsEqual( - expected *hkeyElements, - actual *hkeyElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapHkeyElementsEqual(expected, actual *hkeyElements, actualDecodedFromCompactMap bool) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("hkeyElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1060,12 +1155,12 @@ func mapHkeyElementsEqual( return NewFatalError(fmt.Errorf("hkeyElements size %d is wrong, want %d", actual.size, expected.size)) } - if len(expected.hkeys) == 0 { - if len(actual.hkeys) != 0 { - return NewFatalError(fmt.Errorf("hkeyElements hkeys %v is wrong, want %v", actual.hkeys, expected.hkeys)) - } - } else { - if !reflect.DeepEqual(expected.hkeys, actual.hkeys) { + if len(expected.hkeys) != len(actual.hkeys) { + return NewFatalError(fmt.Errorf("hkeyElements hkeys len %d is wrong, want %d", len(actual.hkeys), len(expected.hkeys))) + } + + if !actualDecodedFromCompactMap { + if len(expected.hkeys) > 0 && !reflect.DeepEqual(expected.hkeys, actual.hkeys) { return NewFatalError(fmt.Errorf("hkeyElements hkeys %v is wrong, want %v", actual.hkeys, expected.hkeys)) } } @@ -1074,39 +1169,37 @@ func mapHkeyElementsEqual( return NewFatalError(fmt.Errorf("hkeyElements elems len %d is wrong, want %d", len(actual.elems), len(expected.elems))) } - for i := 0; i < len(expected.elems); i++ { - expectedEle := expected.elems[i] - actualEle := actual.elems[i] - - err := mapElementEqual( - expectedEle, - actualEle, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). - return err + if actualDecodedFromCompactMap { + for _, expectedEle := range expected.elems { + found := false + for _, actualEle := range actual.elems { + err := v.mapElementEqual(expectedEle, actualEle, actualDecodedFromCompactMap) + if err == nil { + found = true + break + } + } + if !found { + return NewFatalError(fmt.Errorf("hkeyElements elem %v is not found", expectedEle)) + } + } + } else { + for i := 0; i < len(expected.elems); i++ { + expectedEle := expected.elems[i] + actualEle := actual.elems[i] + + err := v.mapElementEqual(expectedEle, actualEle, actualDecodedFromCompactMap) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by mapElementEqual(). + return err + } } } return nil } -func mapSingleElementsEqual( - expected *singleElements, - actual *singleElements, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementsEqual(expected, actual *singleElements) error { if expected.level != actual.level { return NewFatalError(fmt.Errorf("singleElements level %d is wrong, want %d", actual.level, expected.level)) @@ -1124,16 +1217,7 @@ func mapSingleElementsEqual( expectedElem := expected.elems[i] actualElem := actual.elems[i] - err := mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err := v.mapSingleElementEqual(expectedElem, actualElem) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapSingleElementEqual(). return err @@ -1143,16 +1227,7 @@ func mapSingleElementsEqual( return nil } -func mapElementEqual( - expected element, - actual element, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapElementEqual(expected, actual element, actualDecodedFromCompactMap bool) error { switch expectedElem := expected.(type) { case *singleElement: @@ -1160,64 +1235,27 @@ func mapElementEqual( if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapSingleElementEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapSingleElementEqual(expectedElem, actualElem) case *inlineCollisionGroup: actualElem, ok := actual.(*inlineCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapElementsEqual( - expectedElem.elements, - actualElem.elements, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + return v.mapElementsEqual(expectedElem.elements, actualElem.elements, actualDecodedFromCompactMap) case *externalCollisionGroup: actualElem, ok := actual.(*externalCollisionGroup) if !ok { return NewFatalError(fmt.Errorf("elements type %T is wrong, want %T", actual, expected)) } - return mapExternalCollisionElementsEqual( - expectedElem, - actualElem, - storage, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) - + return v.mapExternalCollisionElementsEqual(expectedElem, actualElem) } return nil } -func mapExternalCollisionElementsEqual( - expected *externalCollisionGroup, - actual *externalCollisionGroup, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapExternalCollisionElementsEqual(expected, actual *externalCollisionGroup) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("externalCollisionGroup size %d is wrong, want %d", actual.size, expected.size)) @@ -1227,100 +1265,96 @@ func mapExternalCollisionElementsEqual( return NewFatalError(fmt.Errorf("externalCollisionGroup id %d is wrong, want %d", actual.slabID, expected.slabID)) } + slab, err := getMapSlab(v.storage, expected.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return err + } + // Compare external collision slab - err := validMapSlabSerialization( - storage, - expected.slabID, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyMapSlab(slab) if err != nil { - // Don't need to wrap error as external error because err is already categorized by validMapSlabSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyMapSlab(). return err } return nil } -func mapSingleElementEqual( - expected *singleElement, - actual *singleElement, - storage SlabStorage, - cborDecMode cbor.DecMode, - cborEncMode cbor.EncMode, - decodeStorable StorableDecoder, - decodeTypeInfo TypeInfoDecoder, - compare StorableComparator, -) error { +func (v *serializationVerifier) mapSingleElementEqual(expected, actual *singleElement) error { if expected.size != actual.size { return NewFatalError(fmt.Errorf("singleElement size %d is wrong, want %d", actual.size, expected.size)) } - if !compare(expected.key, actual.key) { + if !v.compare(expected.key, actual.key) { return NewFatalError(fmt.Errorf("singleElement key %v is wrong, want %v", actual.key, expected.key)) } // Compare key stored in a separate slab if idStorable, ok := expected.key.(SlabIDStorable); ok { - v, err := idStorable.StoredValue(storage) + value, err := idStorable.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyValue(). return err } } - if !compare(expected.value, actual.value) { - return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) - } - - // Compare value stored in a separate slab - if idStorable, ok := expected.value.(SlabIDStorable); ok { + // Compare nested element + switch ee := expected.value.(type) { + case SlabIDStorable: // Compare not-inlined element + if !v.compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } - v, err := idStorable.StoredValue(storage) + value, err := ee.StoredValue(v.storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err } - err = ValidValueSerialization( - v, - cborDecMode, - cborEncMode, - decodeStorable, - decodeTypeInfo, - compare, - ) + err = v.verifyValue(value) if err != nil { - // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). + // Don't need to wrap error as external error because err is already categorized by verifyVaue(). return err } + + case *ArrayDataSlab: // Compare inlined array element + ae, ok := actual.value.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return v.arrayDataSlabEqual(ee, ae) + + case *MapDataSlab: // Compare inlined map element + ae, ok := actual.value.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return v.mapDataSlabEqual(ee, ae) + + default: + if !v.compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } } return nil } -func mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { +func (v *serializationVerifier) mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { // Compare extra data - err := mapExtraDataEqual(expected.extraData, actual.extraData) + err := mapExtraDataEqual(expected.extraData, actual.extraData, false) if err != nil { // Don't need to wrap error as external error because err is already categorized by mapExtraDataEqual(). return err @@ -1339,7 +1373,7 @@ func mapMetaDataSlabEqual(expected, actual *MapMetaDataSlab) error { return nil } -func mapExtraDataEqual(expected, actual *MapExtraData) error { +func mapExtraDataEqual(expected, actual *MapExtraData, actualDecodedFromCompactMap bool) error { if (expected == nil) && (actual == nil) { return nil @@ -1349,8 +1383,81 @@ func mapExtraDataEqual(expected, actual *MapExtraData) error { return NewFatalError(fmt.Errorf("has extra data is %t, want %t", actual == nil, expected == nil)) } - if !reflect.DeepEqual(*expected, *actual) { - return NewFatalError(fmt.Errorf("extra data %+v is wrong, want %+v", *actual, *expected)) + if !reflect.DeepEqual(expected.TypeInfo, actual.TypeInfo) { + return NewFatalError(fmt.Errorf("map extra data type %+v is wrong, want %+v", actual.TypeInfo, expected.TypeInfo)) + } + + if expected.Count != actual.Count { + return NewFatalError(fmt.Errorf("map extra data count %d is wrong, want %d", actual.Count, expected.Count)) + } + + if !actualDecodedFromCompactMap { + if expected.Seed != actual.Seed { + return NewFatalError(fmt.Errorf("map extra data seed %d is wrong, want %d", actual.Seed, expected.Seed)) + } + } + + return nil +} + +// verifyMapValueID verifies map ValueID is always the same as +// root slab's SlabID indepedent of map's inlined status. +func verifyMapValueID(m *OrderedMap) error { + rootSlabID := m.root.Header().slabID + + vid := m.ValueID() + + if !bytes.Equal(vid[:slabAddressSize], rootSlabID.address[:]) { + return NewFatalError( + fmt.Errorf( + "expect first %d bytes of array value ID as %v, got %v", + slabAddressSize, + rootSlabID.address[:], + vid[:slabAddressSize])) + } + + if !bytes.Equal(vid[slabAddressSize:], rootSlabID.index[:]) { + return NewFatalError( + fmt.Errorf( + "expect second %d bytes of array value ID as %v, got %v", + slabIndexSize, + rootSlabID.index[:], + vid[slabAddressSize:])) + } + + return nil +} + +// verifyMapSlabID verifies map SlabID is either empty for inlined map, or +// same as root slab's SlabID for not-inlined map. +func verifyMapSlabID(m *OrderedMap) error { + sid := m.SlabID() + + if m.Inlined() { + if sid != SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect empty slab ID for inlined array, got %v", + sid)) + } + return nil + } + + rootSlabID := m.root.Header().slabID + + if sid == SlabIDUndefined { + return NewFatalError( + fmt.Errorf( + "expect non-empty slab ID for not-inlined array, got %v", + sid)) + } + + if sid != rootSlabID { + return NewFatalError( + fmt.Errorf( + "expect array slab ID same as root slab's slab ID %s, got %s", + rootSlabID, + sid)) } return nil diff --git a/map_test.go b/map_test.go index e4d7af3..e783811 100644 --- a/map_test.go +++ b/map_test.go @@ -24,6 +24,7 @@ import ( "math" "math/rand" "reflect" + "runtime" "sort" "strings" "testing" @@ -89,19 +90,27 @@ func (h *errorDigesterBuilder) Digest(_ HashInputProvider, _ Value) (Digester, e return nil, h.err } -func verifyEmptyMap( +func testEmptyMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, address Address, m *OrderedMap, ) { - verifyMap(t, storage, typeInfo, address, m, nil, nil, false) + testMapV0(t, storage, typeInfo, address, m, nil, nil, false) } -// verifyMap verifies map elements and validates serialization and in-memory slab tree. -// It also verifies elements ordering if sortedKeys is not nil. -func verifyMap( +func testEmptyMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, +) { + testMap(t, storage, typeInfo, address, m, nil, nil, false) +} + +func testMapV0( t *testing.T, storage *PersistentSlabStorage, typeInfo TypeInfo, @@ -110,49 +119,78 @@ func verifyMap( keyValues map[Value]Value, sortedKeys []Value, hasNestedArrayMapElement bool, +) { + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, false) +} + +func testMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + keyValues mapValue, + sortedKeys []Value, + hasNestedArrayMapElement bool, +) { + _testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, hasNestedArrayMapElement, true) +} + +// _testMap verifies map elements and validates serialization and in-memory slab tree. +// It also verifies elements ordering if sortedKeys is not nil. +func _testMap( + t *testing.T, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, + m *OrderedMap, + expectedKeyValues map[Value]Value, + sortedKeys []Value, + hasNestedArrayMapElement bool, + inlineEnabled bool, ) { require.True(t, typeInfoComparator(typeInfo, m.Type())) require.Equal(t, address, m.Address()) - require.Equal(t, uint64(len(keyValues)), m.Count()) + require.Equal(t, uint64(len(expectedKeyValues)), m.Count()) var err error // Verify map elements - for k, v := range keyValues { - e, err := m.Get(compare, hashInputProvider, k) + for k, expected := range expectedKeyValues { + actual, err := m.Get(compare, hashInputProvider, k) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, e) + valueEqual(t, expected, actual) } // Verify map elements ordering if len(sortedKeys) > 0 { - require.Equal(t, len(keyValues), len(sortedKeys)) + require.Equal(t, len(expectedKeyValues), len(sortedKeys)) i := 0 - err = m.Iterate(func(k, v Value) (bool, error) { + err = m.IterateReadOnly(func(k, v Value) (bool, error) { expectedKey := sortedKeys[i] - expectedValue := keyValues[expectedKey] + expectedValue := expectedKeyValues[expectedKey] - valueEqual(t, typeInfoComparator, expectedKey, k) - valueEqual(t, typeInfoComparator, expectedValue, v) + valueEqual(t, expectedKey, k) + valueEqual(t, expectedValue, v) i++ return true, nil }) require.NoError(t, err) - require.Equal(t, len(keyValues), i) + require.Equal(t, len(expectedKeyValues), i) } // Verify in-memory slabs - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, inlineEnabled) if err != nil { PrintMap(m) } require.NoError(t, err) // Verify slab serializations - err = ValidMapSerialization( + err = VerifyMapSerialization( m, storage.cborDecMode, storage.cborEncMode, @@ -178,6 +216,31 @@ func verifyMap( require.Equal(t, 1, len(rootIDs)) require.Equal(t, m.SlabID(), rootIDs[0]) + // Encode all non-nil slab + encodedSlabs := make(map[SlabID][]byte) + for id, slab := range storage.deltas { + if slab != nil { + b, err := EncodeSlab(slab, storage.cborEncMode) + require.NoError(t, err) + encodedSlabs[id] = b + } + } + + // Test decoded map from new storage to force slab decoding + decodedMap, err := NewMapWithRootID( + newTestPersistentStorageWithBaseStorageAndDeltas(t, storage.baseStorage, encodedSlabs), + m.SlabID(), + m.digesterBuilder) + require.NoError(t, err) + + // Verify decoded map elements + for k, expected := range expectedKeyValues { + actual, err := decodedMap.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + valueEqual(t, expected, actual) + } + if !hasNestedArrayMapElement { // Need to call Commit before calling storage.Count() for PersistentSlabStorage. err = storage.Commit() @@ -187,7 +250,7 @@ func verifyMap( require.NoError(t, err) require.Equal(t, stats.SlabCount(), uint64(storage.Count())) - if len(keyValues) == 0 { + if len(expectedKeyValues) == 0 { // Verify slab count for empty map require.Equal(t, uint64(1), stats.DataSlabCount) require.Equal(t, uint64(0), stats.MetaDataSlabCount) @@ -274,7 +337,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys", func(t *testing.T) { @@ -321,12 +384,12 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("random key and value", func(t *testing.T) { @@ -361,7 +424,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("unique keys with hash collision", func(t *testing.T) { @@ -410,7 +473,7 @@ func TestMapSetAndGet(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("replicate keys with hash collision", func(t *testing.T) { @@ -469,12 +532,12 @@ func TestMapSetAndGet(t *testing.T) { existingValue, err := existingStorable.StoredValue(storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldValue, existingValue) + valueEqual(t, oldValue, existingValue) keyValues[k] = newValue } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -511,7 +574,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision", func(t *testing.T) { @@ -554,7 +617,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision group", func(t *testing.T) { @@ -597,7 +660,7 @@ func TestMapGetKeyNotFound(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } @@ -682,11 +745,11 @@ func testMapRemoveElement(t *testing.T, m *OrderedMap, k Value, expectedV Value) removedKey, err := removedKeyStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + valueEqual(t, k, removedKey) removedValue, err := removedValueStorable.StoredValue(m.Storage) require.NoError(t, err) - valueEqual(t, typeInfoComparator, expectedV, removedValue) + valueEqual(t, expectedV, removedValue) if id, ok := removedKeyStorable.(SlabIDStorable); ok { err = m.Storage.Remove(SlabID(id)) @@ -763,7 +826,7 @@ func TestMapRemove(t *testing.T) { require.Nil(t, existingStorable) } - verifyMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, tc.keyValues, nil, false) count := len(tc.keyValues) @@ -779,7 +842,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) } @@ -864,7 +927,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -878,7 +941,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("collision with data root", func(t *testing.T) { @@ -947,7 +1010,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) + testMap(t, storage, typeInfo, address, m, nonCollisionKeyValues, nil, false) // Remove remaining elements for k, v := range nonCollisionKeyValues { @@ -961,7 +1024,7 @@ func TestMapRemove(t *testing.T) { require.Equal(t, uint64(count), m.Count()) } - verifyEmptyMap(t, storage, typeInfo, address, m) + testEmptyMap(t, storage, typeInfo, address, m) }) t.Run("no collision key not found", func(t *testing.T) { @@ -997,7 +1060,7 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) t.Run("collision key not found", func(t *testing.T) { @@ -1041,11 +1104,53 @@ func TestMapRemove(t *testing.T) { require.ErrorAs(t, err, &keyNotFoundError) require.ErrorAs(t, userError, &keyNotFoundError) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } -func TestMapIterate(t *testing.T) { +func TestReadOnlyMapIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // Iterate key value pairs + i := 0 + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate keys + i = 0 + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + // Iterate values + i = 0 + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) t.Run("no collision", func(t *testing.T) { const ( @@ -1086,9 +1191,9 @@ func TestMapIterate(t *testing.T) { // Iterate key value pairs i = uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1098,8 +1203,8 @@ func TestMapIterate(t *testing.T) { // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) @@ -1109,9 +1214,9 @@ func TestMapIterate(t *testing.T) { // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { k := sortedKeys[i] - valueEqual(t, typeInfoComparator, keyValues[k], v) + valueEqual(t, keyValues[k], v) i++ return true, nil }) @@ -1119,7 +1224,7 @@ func TestMapIterate(t *testing.T) { require.NoError(t, err) require.Equal(t, uint64(mapSize), i) - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) t.Run("collision", func(t *testing.T) { @@ -1163,5711 +1268,18325 @@ func TestMapIterate(t *testing.T) { } } - t.Log("created map of unique key value pairs") - // Sort keys by digest sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - t.Log("sorted keys by digests") - // Iterate key value pairs i := uint64(0) - err = m.Iterate(func(k Value, v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) - valueEqual(t, typeInfoComparator, keyValues[k], v) + err = m.IterateReadOnly(func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated key value pairs") - // Iterate keys i = uint64(0) - err = m.IterateKeys(func(k Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, sortedKeys[i], k) + err = m.IterateReadOnlyKeys(func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated keys") - // Iterate values i = uint64(0) - err = m.IterateValues(func(v Value) (resume bool, err error) { - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], v) + err = m.IterateReadOnlyValues(func(v Value) (resume bool, err error) { + valueEqual(t, keyValues[sortedKeys[i]], v) i++ return true, nil }) require.NoError(t, err) require.Equal(t, i, uint64(mapSize)) - t.Log("iterated values") - - verifyMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) }) } -func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { +func TestMutateElementFromReadOnlyMapIterator(t *testing.T) { - const ( - mapSize = 1024 - keyStringMaxSize = 1024 - valueStringMaxSize = 1024 + SetThreshold(256) + defer SetThreshold(1024) - // mockDigestCount is the number of unique set of digests. - // Each set has maxDigestLevel of digest. - mockDigestCount = 8 - ) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() - uniqueFirstLevelDigests := make(map[Digest]bool, mockDigestCount) - firstLevelDigests := make([]Digest, 0, mockDigestCount) - for len(firstLevelDigests) < mockDigestCount { - d := Digest(uint64(r.Intn(256))) - if !uniqueFirstLevelDigests[d] { - uniqueFirstLevelDigests[d] = true - firstLevelDigests = append(firstLevelDigests, d) - } - } + var mutationError *ReadOnlyIteratorElementMutationError - digestsGroup := make([][]Digest, mockDigestCount) - for i := 0; i < mockDigestCount; i++ { - digests := make([]Digest, maxDigestLevel) - digests[0] = firstLevelDigests[i] - for j := 1; j < maxDigestLevel; j++ { - digests[j] = Digest(uint64(r.Intn(256))) - } - digestsGroup[i] = digests - } + t.Run("mutate inlined map key from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - digesterBuilder := &mockDigesterBuilder{} + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) - keyValues := make(map[Value]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, r.Intn(keyStringMaxSize))) - if _, found := keyValues[k]; !found { - keyValues[k] = NewStringValue(randStr(r, r.Intn(valueStringMaxSize))) + // parent map {{}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMapKey.Inlined()) - index := i % len(digestsGroup) - digests := digestsGroup[index] - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Iterate elements and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - i++ - } - } + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), k.(mutableValueNotifier).ValueID()) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + t.Run("mutate inlined map value from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) + + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) require.NoError(t, err) require.Nil(t, existingStorable) - } + require.True(t, childMap.Inlined()) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Iterate elements and modify value + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - stats, err := GetMapStats(m) - require.NoError(t, err) - require.Equal(t, uint64(mockDigestCount), stats.CollisionDataSlabCount) + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - // Remove all elements - for k, v := range keyValues { - removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map key from IterateReadOnlyKeys", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - removedKey, err := removedKeyStorable.StoredValue(storage) + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + require.False(t, childMapKey.Inlined()) - removedValue, err := removedValueStorable.StoredValue(storage) + // parent map {{}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + require.Nil(t, existingStorable) + require.True(t, childMapKey.Inlined()) - if id, ok := removedKeyStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } + // Iterate and modify key + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - if id, ok := removedValueStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } - } + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - verifyEmptyMap(t, storage, typeInfo, address, m) -} + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), v.(mutableValueNotifier).ValueID()) + }) -func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) - const ( - mapSize = 1024 - keyStringMaxSize = 1024 - valueStringMaxSize = 1024 - ) + t.Run("mutate inlined map value from IterateReadOnlyValues", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - digesterBuilder := &mockDigesterBuilder{} + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, r.Intn(keyStringMaxSize))) + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) - if _, found := keyValues[k]; !found { - keyValues[k] = NewStringValue(randStr(r, valueStringMaxSize)) + // Iterate and modify value + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - var digests []Digest - for i := 0; i < maxDigestLevel; i++ { - digests = append(digests, Digest(r.Intn(256))) - } + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(1), Uint64Value(1)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } - } + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + t.Run("mutate not inlined map key from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) + + // Inserting elements into childMapKey so it can't be inlined + const size = 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMapKey.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // parent map {{...}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) require.NoError(t, err) require.Nil(t, existingStorable) - } + require.False(t, childMapKey.Inlined()) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Iterate elements and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - // Remove all elements - for k, v := range keyValues { - removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), k.(mutableValueNotifier).ValueID()) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) + + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) + + t.Run("mutate not inlined map value from IterateReadOnly", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - removedKey, err := removedKeyStorable.StoredValue(storage) + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + require.False(t, childMap.Inlined()) - removedValue, err := removedValueStorable.StoredValue(storage) + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, removedValue) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) - if id, ok := removedKeyStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) + // Inserting elements into childMap until it is no longer inlined + for i := 0; childMap.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) } - if id, ok := removedValueStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } - } + // Iterate elements and modify value + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - verifyEmptyMap(t, storage, typeInfo, address, m) -} + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) -func TestMapHashCollision(t *testing.T) { + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) - SetThreshold(512) - defer SetThreshold(1024) + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) - const maxDigestLevel = 4 + t.Run("mutate not inlined map key from IterateReadOnlyKeys", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - r := newRand(t) + // child map key {} + childMapKey, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMapKey.Inlined()) - for hashLevel := 1; hashLevel <= maxDigestLevel; hashLevel++ { - name := fmt.Sprintf("deterministic max hash level %d", hashLevel) - t.Run(name, func(t *testing.T) { - testMapDeterministicHashCollision(t, r, hashLevel) - }) - } + // Inserting elements into childMap so it can't be inlined. + const size = 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMapKey.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - for hashLevel := 1; hashLevel <= maxDigestLevel; hashLevel++ { - name := fmt.Sprintf("random max hash level %d", hashLevel) - t.Run(name, func(t *testing.T) { - testMapRandomHashCollision(t, r, hashLevel) - }) - } -} + // parent map {{...}: 0} + existingStorable, err := parentMap.Set(compare, hashInputProvider, NewHashableMap(childMapKey), Uint64Value(0)) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.False(t, childMapKey.Inlined()) -func testMapSetRemoveRandomValues( - t *testing.T, - r *rand.Rand, - storage *PersistentSlabStorage, - typeInfo TypeInfo, - address Address, -) (*OrderedMap, map[Value]Value) { + // Iterate and modify key + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - const ( - MapSetOp = iota - MapRemoveOp - MapMaxOp - ) + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) - const ( - opCount = 4096 - digestMaxValue = 256 - digestMaxLevels = 4 - ) + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + require.Equal(t, childMapKey.ValueID(), v.(mutableValueNotifier).ValueID()) + }) - digesterBuilder := &mockDigesterBuilder{} + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + t.Run("mutate not inlined map value from IterateReadOnlyValues", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - keyValues := make(map[Value]Value) - var keys []Value - for i := uint64(0); i < opCount; i++ { + // child map {} + childMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + require.False(t, childMap.Inlined()) - nextOp := r.Intn(MapMaxOp) + // parent map {0: {}} + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.True(t, childMap.Inlined()) - if m.Count() == 0 { - nextOp = MapSetOp + // Inserting elements into childMap until it is no longer inlined + for i := 0; childMap.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) } - switch nextOp { + // Iterate and modify value + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - case MapSetOp: + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) - k := randomValue(r, int(maxInlineMapElementSize)) - v := randomValue(r, int(maxInlineMapElementSize)) + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + require.Equal(t, childMap.ValueID(), v.(mutableValueNotifier).ValueID()) + }) - var digests []Digest - for i := 0; i < digestMaxLevels; i++ { - digests = append(digests, Digest(r.Intn(digestMaxValue))) - } + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + + t.Run("mutate inlined map key in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {{}:0, {}:1} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) + digests := []Digest{Digest(0)} digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) + } - if oldv, ok := keyValues[k]; ok { - require.NotNil(t, existingStorable) - - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, oldv, existingValue) + // Iterate element and modify key + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - if id, ok := existingStorable.(SlabIDStorable); ok { - err = storage.Remove(SlabID(id)) - require.NoError(t, err) - } - } else { + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) require.Nil(t, existingStorable) - keys = append(keys, k) - } + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + vid := k.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) - keyValues[k] = v + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) - case MapRemoveOp: - index := r.Intn(len(keys)) - k := keys[index] + t.Run("mutate inlined map value in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} - removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) - require.NoError(t, err) + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - removedKey, err := removedKeyStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, k, removedKey) + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - removedValue, err := removedValueStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[k], removedValue) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - if id, ok := removedKeyStorable.(SlabIDStorable); ok { - err := storage.Remove(SlabID(id)) - require.NoError(t, err) - } + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) - if id, ok := removedValueStorable.(SlabIDStorable); ok { - err := storage.Remove(SlabID(id)) - require.NoError(t, err) - } + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - delete(keyValues, k) - copy(keys[index:], keys[index+1:]) - keys = keys[:len(keys)-1] + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) } - require.True(t, typeInfoComparator(typeInfo, m.Type())) - require.Equal(t, address, m.Address()) - require.Equal(t, uint64(len(keys)), m.Count()) - } + // Iterate elements and modify values + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - return m, keyValues -} + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(1), Uint64Value(1)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) -func TestMapSetRemoveRandomValues(t *testing.T) { + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) - SetThreshold(256) - defer SetThreshold(1024) + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) - r := newRand(t) + t.Run("mutate inlined map key in collision from IterateReadOnlyKeys", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} - storage := newTestPersistentStorage(t) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) -} + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) -func TestMapDecodeV0(t *testing.T) { + // parentMap {{}: 0, {}: 1} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) - t.Run("empty", func(t *testing.T) { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Iterate and modify keys + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - slabData := map[SlabID][]byte{ - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info - 0x18, 0x2a, - // count: 0 - 0x00, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - // version - 0x00, - // flag: root + map data - 0x88, + return true, err + }, + func(v Value) { + keyMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }) - // the following encoded data is valid CBOR + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) - // elements (array of 3 elements) - 0x83, + t.Run("mutate inlined map value in collision from IterateReadOnlyValues", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} - // level: 0 - 0x00, + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // elements (array of 0 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }, + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) + + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) } - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + // Iterate and modify values + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.True(t, c.Inlined()) - // Test new map from storage - decodedMap, err := NewMapWithRootID(storage, mapSlabID, NewDefaultDigesterBuilder()) - require.NoError(t, err) + existingStorable, err := c.Set(compare, hashInputProvider, Uint64Value(0), Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingStorable) - verifyEmptyMap(t, storage, typeInfo, address, decodedMap) - }) + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) - t.Run("dataslab as root", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) + t.Run("mutate not inlined map key in collision from IterateReadOnly", func(t *testing.T) { digesterBuilder := &mockDigesterBuilder{} - const mapSize = 1 - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + const size = 20 + for i := 0; i < size; i++ { k := Uint64Value(i) - v := Uint64Value(i * 2) - keyValues[k] = v + v := Uint64Value(i) - digests := []Digest{Digest(i), Digest(i * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + existingStorable, err := childMapKey1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) } - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Expected serialized slab data with slab id - slabData := map[SlabID][]byte{ + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info - 0x18, 0x2a, - // count: 1 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + existingStorable, err := childMapKey2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - // version - 0x00, - // flag: root + map data - 0x88, + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // the following encoded data is valid CBOR + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) - // elements (array of 3 elements) - 0x83, + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) - // level: 0 - 0x00, + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Iterate elements and modify keys + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := k.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // element: [uint64(0):uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err }, - } + func(k Value) { + keyMutationCallbackCalled = true + vid := k.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }, + func(v Value) { + valueMutationCallbackCalled = true + }) - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + require.False(t, valueMutationCallbackCalled) + }) - // Test new map from storage - decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + t.Run("mutate not inlined map value in collision from IterateReadOnly", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) - }) + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - t.Run("has pointer no collision", func(t *testing.T) { + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - SetThreshold(256) - defer SetThreshold(1024) + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) - storage := newTestBasicStorage(t) + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - digesterBuilder := &mockDigesterBuilder{} + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 8 - keyValues := make(map[Value]Value, mapSize) - r := 'a' - for i := uint64(0); i < mapSize-1; i++ { - k := NewStringValue(strings.Repeat(string(r), 22)) - v := NewStringValue(strings.Repeat(string(r), 22)) - keyValues[k] = v + for i := 0; childMap1.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) - digests := []Digest{Digest(i), Digest(i * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + existingStorable, err := childMap1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - r++ + for i := 0; childMap2.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := childMap2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) } - // Create nested array - typeInfo2 := testTypeInfo{43} + // Iterate elements and modify values + var keyMutationCallbackCalled, valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyWithMutationCallback( + func(k Value, v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - nestedSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) - nested, err := NewArray(storage, address, typeInfo2) - nested.root.SetSlabID(nestedSlabID) - require.NoError(t, err) + return true, err + }, + func(k Value) { + keyMutationCallbackCalled = true + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) + + require.ErrorAs(t, err, &mutationError) + require.False(t, keyMutationCallbackCalled) + require.True(t, valueMutationCallbackCalled) + }) - err = nested.Append(Uint64Value(0)) + t.Run("mutate not inlined map key in collision from IterateReadOnlyKeys", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} + + // childMapKey1 {} + childMapKey1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) require.NoError(t, err) - k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + size := 20 + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + existingStorable, err := childMapKey1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - slabData := map[SlabID][]byte{ + // childMapKey2 {} + childMapKey2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // metadata slab - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + map meta - 0x89, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - // version - 0x00, - // flag: root + meta - 0x89, - // child header count - 0x00, 0x02, - // child header 1 (slab id, first key, size) - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // NOTE: size is modified to pass validation test. - // This shouldn't affect migration because metadata slab is recreated when elements are migrated. - 0x00, 0x00, 0x00, 0xf6, - // child header 2 - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // NOTE: size is modified to pass validation test. - // This shouldn't affect migration because metadata slab is recreated when elements are migrated. - 0x00, 0x00, 0x00, 0xf2, - }, + existingStorable, err := childMapKey2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - // data slab - id2: { - // version - 0x00, - // flag: map data - 0x08, - // next slab id - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // the following encoded data is valid CBOR + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMapKey1, childMapKey2} { + k := NewHashableMap(m) + v := Uint64Value(i) - // elements (array of 3 elements) - 0x83, + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // This is needed because Digest is called again on OrderedMap when inserting collision element. + digesterBuilder.On("Digest", m).Return(mockDigester{digests}) - // level: 0 - 0x00, + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - // hkeys (byte string of length 8 * 4) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // Iterate and modify keys + var keyMutationCallbackCalled bool + err = parentMap.IterateReadOnlyKeysWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - // elements (array of 4 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] - 0x82, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] - 0x82, - 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, - 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, - // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] - 0x82, - 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] - 0x82, - 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, - 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) + + return true, err }, + func(v Value) { + keyMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMapKey1.ValueID() == vid || childMapKey2.ValueID() == vid) + }) - // data slab - id3: { - // version - 0x00, - // flag: has pointer + map data - 0x48, - // next slab id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + require.ErrorAs(t, err, &mutationError) + require.True(t, keyMutationCallbackCalled) + }) - // the following encoded data is valid CBOR + t.Run("mutate not inlined map value in collision from IterateReadOnlyValues", func(t *testing.T) { + digesterBuilder := &mockDigesterBuilder{} - // elements (array of 3 elements) - 0x83, + // childMap1 {} + childMap1, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // level: 0 - 0x00, + // childMap2 {} + childMap2, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) - // hkeys (byte string of length 8 * 4) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 4 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] - 0x82, - 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, - 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, - // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] - 0x82, - 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] - 0x82, - 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, - 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, - // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] - 0x82, - 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, + // parentMap {0: {}, 1:{}} with all elements in the same collision group + for i, m := range []*OrderedMap{childMap1, childMap2} { + k := Uint64Value(i) - // array data slab - nestedSlabID: { - // version - 0x00, - // flag: root + array data - 0x80, - // extra data (CBOR encoded array of 1 elements) - 0x81, - // type info - 0x18, 0x2b, + digests := []Digest{Digest(0)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - // version - 0x00, - // flag: root + array data - 0x80, - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, - }, + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, m) + require.NoError(t, err) + require.Nil(t, existingStorable) } - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, slabData) - - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, mapSlabID, digesterBuilder) - require.NoError(t, err) - - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) - }) - - t.Run("inline collision 1 level", func(t *testing.T) { - - SetThreshold(256) - defer SetThreshold(1024) + for i := 0; childMap1.Inlined(); i++ { + k := Uint64Value(i) + v := Uint64Value(i) - digesterBuilder := &mockDigesterBuilder{} + existingStorable, err := childMap1.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 8 - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { + for i := 0; childMap2.Inlined(); i++ { k := Uint64Value(i) - v := Uint64Value(i * 2) - - digests := []Digest{Digest(i % 4), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + v := Uint64Value(i) - keyValues[k] = v + existingStorable, err := childMap2.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) } - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - - slabData := map[SlabID][]byte{ + // Iterate and modify values + var valueMutationCallbackCalled bool + err = parentMap.IterateReadOnlyValuesWithMutationCallback( + func(v Value) (resume bool, err error) { + c, ok := v.(*OrderedMap) + require.True(t, ok) + require.False(t, c.Inlined()) - // map metadata slab - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + existingKeyStorable, existingValueStorable, err := c.Remove(compare, hashInputProvider, Uint64Value(0)) + require.ErrorAs(t, err, &mutationError) + require.Nil(t, existingKeyStorable) + require.Nil(t, existingValueStorable) - // version - 0x00, - // flag: root + map data - 0x88, + return true, err + }, + func(v Value) { + valueMutationCallbackCalled = true + vid := v.(mutableValueNotifier).ValueID() + require.True(t, childMap1.ValueID() == vid || childMap2.ValueID() == vid) + }) - // the following encoded data is valid CBOR + require.ErrorAs(t, err, &mutationError) + require.True(t, valueMutationCallbackCalled) + }) +} - // elements (array of 3 elements) - 0x83, +func TestMutableMapIterate(t *testing.T) { - // level: 0 - 0x00, + t.Run("empty", func(t *testing.T) { - // hkeys (byte string of length 8 * 4) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - // elements (array of 2 elements) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Iterate key value pairs + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + i++ + return true, nil + }) - // level: 1 - 0x01, + require.NoError(t, err) + require.Equal(t, 0, i) - // hkeys (byte string of length 8 * 2) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + const mapSize = 15 - // level: 1 - 0x01, + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() - // hkeys (byte string of length 8 * 2) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 1 - 0x01, + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) - // hkeys (byte string of length 8 * 2) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + newValue := v.(Uint64Value) * 2 - // level: 1 - 0x01, + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) - // hkeys (byte string of length 8 * 2) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - }, - } + keyValues[k] = newValue - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + i++ - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + return true, nil + }) require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) - t.Run("inline collision 2 levels", func(t *testing.T) { - + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - digesterBuilder := &mockDigesterBuilder{} + const mapSize = 25 - const mapSize = 8 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { k := Uint64Value(i) - v := Uint64Value(i * 2) + v := Uint64Value(i) - digests := []Digest{Digest(i % 4), Digest(i % 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + sortedKeys[i] = k keyValues[k] = v } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - slabData := map[SlabID][]byte{ + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) - // map metadata slab - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - - // version - 0x00, - // flag: root + map data - 0x88, + newValue := v.(Uint64Value) * 2 - // the following encoded data is valid CBOR - - // elements (array of 3 elements) - 0x83, - - // level: 0 - 0x00, + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) - // hkeys (byte string of length 8 * 4) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) - // elements (array of 4 elements) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + keyValues[k] = newValue - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + i++ - // level 1 - 0x01, + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - // elements (array of 1 elements) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - // inline collision group corresponding to hkey [0, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + const mapSize = 15 - // level: 2 - 0x02, + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() - // hkeys (empty byte string) - 0x40, + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 1 - 0x01, + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - // elements (array of 1 elements) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + i := 0 + r := 'a' + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) - // inline collision group corresponding to hkey [1, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + newValue := NewStringValue(strings.Repeat(string(r), 25)) - // level: 2 - 0x02, + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) - // hkeys (empty byte string) - 0x40, + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + keyValues[k] = newValue - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + i++ + r++ - // level: 1 - 0x01, + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - // elements (array of 1 element) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) - // inline collision group corresponding to hkey [2, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + const mapSize = 25 - // level: 2 - 0x02, + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() - // hkeys (empty byte string) - 0x40, + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 2 element) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 1 - 0x01, + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) - // hkeys (byte string of length 8 * 1) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - // elements (array of 1 element) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + i := 0 + r := 'a' + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) - // inline collision group corresponding to hkey [3, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + newValue := NewStringValue(strings.Repeat(string(r), 25)) - // level: 2 - 0x02, + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) - // hkeys (empty byte string) - 0x40, + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) - // elements (array of 2 element) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - }, - } + keyValues[k] = newValue - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + i++ + r++ - // Test new map from storage - decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + return true, nil + }) require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) - t.Run("external collision", func(t *testing.T) { - + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - digesterBuilder := &mockDigesterBuilder{} + const mapSize = 10 - const mapSize = 20 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { k := Uint64Value(i) - v := Uint64Value(i * 2) + v := NewStringValue(strings.Repeat(string(r), 25)) - digests := []Digest{Digest(i % 2), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + r++ keyValues[k] = v + sortedKeys[i] = k } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) - mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) - slabData := map[SlabID][]byte{ + i := 0 + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.Iterate(compare, hashInputProvider, func(k Value, v Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + +func TestMutableMapIterateKeys(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + sortedKeys[i] = k + keyValues[k] = v + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (bool, error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v := keyValues[k] + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateKeys(compare, hashInputProvider, func(k Value) (resume bool, err error) { + valueEqual(t, sortedKeys[i], k) + + v, err := m.Get(compare, hashInputProvider, k) + require.NoError(t, err) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + +func TestMutableMapIterateValues(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testMap(t, storage, typeInfo, address, m, mapValue{}, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, no slab operation", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + sortedKeys[i] = k + keyValues[k] = v + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) * 2 + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is data slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 15 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, sortedKeys[i], k) + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, split slab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 25 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + r := 'a' + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate primitive values, root is metadata slab, merge slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + r := 'a' + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 25)) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + keyValues[k] = v + sortedKeys[i] = k + } + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := 0 + err = m.IterateValues(compare, hashInputProvider, func(v Value) (bool, error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, v, existingValue) + + keyValues[k] = newValue + + i++ + r++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, mapSize, i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision primitive values, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision primitive values, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + newValue := v.(Uint64Value) / 2 + existingStorable, err := m.Set(compare, hashInputProvider, k, newValue) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + require.Equal(t, keyValues[k], existingValue) + + i++ + keyValues[k] = newValue + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := Uint64Value(i) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (updating elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[childKey] = childNewValue + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, split slab", func(t *testing.T) { + const ( + mapSize = 35 + childMapSize = 1 + mutatedChildMapSize = 5 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 10 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j >= mutatedChildMapSize; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("mutate collision inlined container, 1 level", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate collision inlined container, 4 levels", func(t *testing.T) { + const ( + mapSize = 1024 + ) + + r := newRand(t) + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + ck := Uint64Value(0) + cv := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digests := []Digest{ + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + Digest(r.Intn(256)), + } + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + sortedKeys[i] = k + } + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + childKey := Uint64Value(0) + childNewValue := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childNewValue) + require.NoError(t, err) + require.NotNil(t, existingStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues[childKey] = childNewValue + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, i, uint64(mapSize)) + + testMap(t, storage, typeInfo, address, m, keyValues, sortedKeys, false) + }) + + t.Run("mutate inlined container", func(t *testing.T) { + const ( + mapSize = 15 + valueStringSize = 16 + ) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := uint64(0) + for i := 0; i < mapSize; i++ { + ck := Uint64Value(0) + cv := NewStringValue(randStr(r, valueStringSize)) + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + sortedKeys[i] = k + + existingStorable, err = m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = mapValue{ck: cv} + } + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + newChildMapKey := Uint64Value(1) // Previous key is 0 + newChildMapValue := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, newChildMapKey, newChildMapValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + expectedChildMapValues[newChildMapKey] = newChildMapValue + + i++ + + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Iterate and mutate child map (removing elements) + i = uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(2), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + // Remove key 0 + ck := Uint64Value(0) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, ck) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + require.Equal(t, uint64(1), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + delete(expectedChildMapValues, ck) + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 1 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("uninline inlined container, root is metadata slab, merge slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 5 + mutatedChildMapSize = 35 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize; j < mutatedChildMapSize; j++ { + childKey := Uint64Value(j) + childValue := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, childKey, childValue) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[childKey] = childValue + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, no slab operation", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 1 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("inline uninlined container, root is data slab, split slab", func(t *testing.T) { + const ( + mapSize = 15 + childMapSize = 35 + mutatedChildMapSize = 10 + ) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := 0; i < mapSize; i++ { + + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapValues := make(mapValue) + for j := 0; j < childMapSize; j++ { + ck := Uint64Value(j) + cv := Uint64Value(j) + + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + childMapValues[ck] = cv + } + + k := Uint64Value(i) + + existingStorable, err := m.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + keyValues[k] = childMapValues + sortedKeys[i] = k + } + + require.Equal(t, uint64(mapSize), m.Count()) + require.True(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Sort keys by digest + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate and mutate child map (inserting elements) + i := uint64(0) + err = m.IterateValues(compare, hashInputProvider, func(v Value) (resume bool, err error) { + k := sortedKeys[i] + + valueEqual(t, keyValues[k], v) + + childMap, ok := v.(*OrderedMap) + require.True(t, ok) + require.Equal(t, uint64(childMapSize), childMap.Count()) + require.False(t, childMap.Inlined()) + + expectedChildMapValues, ok := keyValues[k].(mapValue) + require.True(t, ok) + + for j := childMapSize - 1; j > mutatedChildMapSize-1; j-- { + childKey := Uint64Value(j) + + existingKeyStorable, existingValueStorable, err := childMap.Remove(compare, hashInputProvider, childKey) + require.NoError(t, err) + require.NotNil(t, existingKeyStorable) + require.NotNil(t, existingValueStorable) + + delete(expectedChildMapValues, childKey) + } + + require.Equal(t, uint64(mutatedChildMapSize), childMap.Count()) + require.True(t, childMap.Inlined()) + + i++ + + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, uint64(mapSize), i) + require.False(t, m.root.IsData()) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} + +func testMapDeterministicHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { + + const ( + mapSize = 1024 + keyStringMaxSize = 1024 + valueStringMaxSize = 1024 + + // mockDigestCount is the number of unique set of digests. + // Each set has maxDigestLevel of digest. + mockDigestCount = 8 + ) + + uniqueFirstLevelDigests := make(map[Digest]bool, mockDigestCount) + firstLevelDigests := make([]Digest, 0, mockDigestCount) + for len(firstLevelDigests) < mockDigestCount { + d := Digest(uint64(r.Intn(256))) + if !uniqueFirstLevelDigests[d] { + uniqueFirstLevelDigests[d] = true + firstLevelDigests = append(firstLevelDigests, d) + } + } + + digestsGroup := make([][]Digest, mockDigestCount) + for i := 0; i < mockDigestCount; i++ { + digests := make([]Digest, maxDigestLevel) + digests[0] = firstLevelDigests[i] + for j := 1; j < maxDigestLevel; j++ { + digests[j] = Digest(uint64(r.Intn(256))) + } + digestsGroup[i] = digests + } + + digesterBuilder := &mockDigesterBuilder{} + + keyValues := make(map[Value]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, r.Intn(keyStringMaxSize))) + if _, found := keyValues[k]; !found { + keyValues[k] = NewStringValue(randStr(r, r.Intn(valueStringMaxSize))) + + index := i % len(digestsGroup) + digests := digestsGroup[index] + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + i++ + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + stats, err := GetMapStats(m) + require.NoError(t, err) + require.Equal(t, uint64(mockDigestCount), stats.CollisionDataSlabCount) + + // Remove all elements + for k, v := range keyValues { + removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + + removedKey, err := removedKeyStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, k, removedKey) + + removedValue, err := removedValueStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, v, removedValue) + + if id, ok := removedKeyStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + if id, ok := removedValueStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + } + + testEmptyMap(t, storage, typeInfo, address, m) +} + +func testMapRandomHashCollision(t *testing.T, r *rand.Rand, maxDigestLevel int) { + + const ( + mapSize = 1024 + keyStringMaxSize = 1024 + valueStringMaxSize = 1024 + ) + + digesterBuilder := &mockDigesterBuilder{} + + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, r.Intn(keyStringMaxSize))) + + if _, found := keyValues[k]; !found { + keyValues[k] = NewStringValue(randStr(r, valueStringMaxSize)) + + var digests []Digest + for i := 0; i < maxDigestLevel; i++ { + digests = append(digests, Digest(r.Intn(256))) + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Remove all elements + for k, v := range keyValues { + removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + + removedKey, err := removedKeyStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, k, removedKey) + + removedValue, err := removedValueStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, v, removedValue) + + if id, ok := removedKeyStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + if id, ok := removedValueStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + } + + testEmptyMap(t, storage, typeInfo, address, m) +} + +func TestMapHashCollision(t *testing.T) { + + SetThreshold(512) + defer SetThreshold(1024) + + const maxDigestLevel = 4 + + r := newRand(t) + + for hashLevel := 1; hashLevel <= maxDigestLevel; hashLevel++ { + name := fmt.Sprintf("deterministic max hash level %d", hashLevel) + t.Run(name, func(t *testing.T) { + testMapDeterministicHashCollision(t, r, hashLevel) + }) + } + + for hashLevel := 1; hashLevel <= maxDigestLevel; hashLevel++ { + name := fmt.Sprintf("random max hash level %d", hashLevel) + t.Run(name, func(t *testing.T) { + testMapRandomHashCollision(t, r, hashLevel) + }) + } +} + +func testMapSetRemoveRandomValues( + t *testing.T, + r *rand.Rand, + storage *PersistentSlabStorage, + typeInfo TypeInfo, + address Address, +) (*OrderedMap, map[Value]Value) { + + const ( + MapSetOp = iota + MapRemoveOp + MapMaxOp + ) + + const ( + opCount = 4096 + digestMaxValue = 256 + digestMaxLevels = 4 + ) + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + var keys []Value + for i := uint64(0); i < opCount; i++ { + + nextOp := r.Intn(MapMaxOp) + + if m.Count() == 0 { + nextOp = MapSetOp + } + + switch nextOp { + + case MapSetOp: + + k := randomValue(r, int(maxInlineMapElementSize)) + v := randomValue(r, int(maxInlineMapElementSize)) + + var digests []Digest + for i := 0; i < digestMaxLevels; i++ { + digests = append(digests, Digest(r.Intn(digestMaxValue))) + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + + if oldv, ok := keyValues[k]; ok { + require.NotNil(t, existingStorable) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, oldv, existingValue) + + if id, ok := existingStorable.(SlabIDStorable); ok { + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } + } else { + require.Nil(t, existingStorable) + + keys = append(keys, k) + } + + keyValues[k] = v + + case MapRemoveOp: + index := r.Intn(len(keys)) + k := keys[index] + + removedKeyStorable, removedValueStorable, err := m.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + + removedKey, err := removedKeyStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, k, removedKey) + + removedValue, err := removedValueStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[k], removedValue) + + if id, ok := removedKeyStorable.(SlabIDStorable); ok { + err := storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + if id, ok := removedValueStorable.(SlabIDStorable); ok { + err := storage.Remove(SlabID(id)) + require.NoError(t, err) + } + + delete(keyValues, k) + copy(keys[index:], keys[index+1:]) + keys = keys[:len(keys)-1] + } + + require.True(t, typeInfoComparator(typeInfo, m.Type())) + require.Equal(t, address, m.Address()) + require.Equal(t, uint64(len(keys)), m.Count()) + } + + return m, keyValues +} + +func TestMapSetRemoveRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + storage := newTestPersistentStorage(t) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) +} + +func TestMapDecodeV0(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + slabData := map[SlabID][]byte{ + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage + decodedMap, err := NewMapWithRootID(storage, mapSlabID, NewDefaultDigesterBuilder()) + require.NoError(t, err) + + testEmptyMapV0(t, storage, typeInfo, address, decodedMap) + }) + + t.Run("dataslab as root", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + digesterBuilder := &mockDigesterBuilder{} + + const mapSize = 1 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + } + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + slabData := map[SlabID][]byte{ + + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage + decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + require.NoError(t, err) + + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("has pointer no collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + r++ + } + + // Create nested array + typeInfo2 := testTypeInfo{43} + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + nestedSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + childArray, err := NewArray(storage, address, typeInfo2) + childArray.root.SetSlabID(nestedSlabID) + require.NoError(t, err) + + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + + keyValues[k] = arrayValue{Uint64Value(0)} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + slabData := map[SlabID][]byte{ + + // metadata slab + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // NOTE: size is modified to pass validation test. + // This shouldn't affect migration because metadata slab is recreated when elements are migrated. + 0x00, 0x00, 0x00, 0xf6, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // NOTE: size is modified to pass validation test. + // This shouldn't affect migration because metadata slab is recreated when elements are migrated. + 0x00, 0x00, 0x00, 0xf2, + }, + + // data slab + id2: { + // version + 0x00, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next slab id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + nestedSlabID: { + // version + 0x00, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // flag: root + array data + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, mapSlabID, digesterBuilder) + require.NoError(t, err) + + testMapV0(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 1 level", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + digesterBuilder := &mockDigesterBuilder{} + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + keyValues[k] = v + } + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + slabData := map[SlabID][]byte{ + + // map metadata slab + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 2 elements) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + require.NoError(t, err) + + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 2 levels", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + digesterBuilder := &mockDigesterBuilder{} + + const mapSize = 8 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i % 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + keyValues[k] = v + } + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + slabData := map[SlabID][]byte{ + + // map metadata slab + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // inline collision group corresponding to hkey [0, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 elements) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // inline collision group corresponding to hkey [1, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 element) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // inline collision group corresponding to hkey [2, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 element) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // inline collision group corresponding to hkey [3, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage + decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + require.NoError(t, err) + + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("external collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + digesterBuilder := &mockDigesterBuilder{} + + const mapSize = 20 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 2), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + keyValues[k] = v + } + + mapSlabID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + slabData := map[SlabID][]byte{ + + // map data slab + mapSlabID: { + // extra data + // version + 0x00, + // flag: root + has pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x14, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + has pointer + map data + 0xc8, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 0 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 1 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // external collision group + id2: { + // version + 0x00, + // flag: any size + collision group + 0x2b, + // next slab id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // hkey: 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + // hkey: 14 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + // hkey: 16 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // element: [uint64(8), uint64(16)] + 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, + // element: [uint64(10), uint64(20)] + 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, + // element: [uint64(12), uint64(24)] + 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, + // element: [uint64(14), uint64(28)] + 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, + // element: [uint64(16), uint64(32)] + 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, + // element: [uint64(18), uint64(36)] + 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + }, + + // external collision group + id3: { + // version + 0x00, + // flag: any size + collision group + 0x2b, + // next slab id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // hkey: 9 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // hkey: 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + // hkey: 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + // hkey: 15 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, + // hkey: 17 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + // hkey: 19 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element: [uint64(9), uint64(18)] + 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, + // element: [uint64(11), uint64(22))] + 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, + // element: [uint64(13), uint64(26)] + 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, + // element: [uint64(15), uint64(30)] + 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, + // element: [uint64(17), uint64(34)] + 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, + // element: [uint64(19), uint64(38)] + 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + }, + } + + // Decode data to new storage + storage := newTestPersistentStorageWithData(t, slabData) + + // Test new map from storage + decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + require.NoError(t, err) + + testMapV0(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + }) +} + +func TestMapEncodeDecode(t *testing.T) { + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + + storage := newTestBasicStorage(t) + + // Create map + m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, NewDefaultDigesterBuilder()) + require.NoError(t, err) + + testEmptyMap(t, storage2, typeInfo, address, decodedMap) + }) + + t.Run("dataslab as root", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 1 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("has inlined array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + typeInfo2 := testTypeInfo{43} + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + err = childArray.Append(Uint64Value(0)) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + + keyValues[k] = arrayValue{Uint64Value(0)} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert nested array + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // metadata slab + id1: { + // version + 0x10, + // flag: root + map meta + 0x89, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf6, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xf3, + }, + + // data slab + id2: { + // version + 0x12, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x11, + // flag: has inlined slab + map data + 0x08, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Verify slab size in header is correct. + meta, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + require.Equal(t, 2, len(meta.childrenHeaders)) + require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) + + const inlinedExtraDataSize = 8 + require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root data slab, inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root data slab, inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo2 + } else { + ti = childMapTypeInfo1 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root data slab, multiple levels of inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) + + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := Uint64Value(i) + + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x0, + // value: 0 + 0xd8, 0xa4, 0x0, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root data slab, multiple levels of inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + gchildMapTypeInfo1 := testTypeInfo{45} + gchildMapTypeInfo2 := testTypeInfo{46} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var gti TypeInfo + if i%2 == 0 { + gti = gchildMapTypeInfo2 + } else { + gti = gchildMapTypeInfo1 + } + + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gti) + require.NoError(t, err) + + gck := Uint64Value(i) + gcv := Uint64Value(i * 2) + + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, gck, gcv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + var cti TypeInfo + if i%2 == 0 { + cti = childMapTypeInfo2 + } else { + cti = childMapTypeInfo1 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), cti) + require.NoError(t, err) + + ck := Uint64Value(i) + + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: mapValue{gck: gcv}} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version 1, flag: has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of type info + 0x80, + // element 1: array of extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined child map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0x18, 0x2b, + // element 1: array of inlined extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + childMapTypeInfo3 := testTypeInfo{45} + childMapTypeInfo4 := testTypeInfo{46} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + + var ti TypeInfo + switch i % 4 { + case 0: + ti = childMapTypeInfo1 + case 1: + ti = childMapTypeInfo2 + case 2: + ti = childMapTypeInfo3 + case 3: + ti = childMapTypeInfo4 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := Uint64Value(i) + cv := Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 1 level", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 2 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 2 levels", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i % 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [0, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [1, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [2, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [3, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("external collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 20 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 2), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x14, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + 0x99, 0x00, 0x02, + + // external collision group corresponding to hkey 0 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 1 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // external collision group + id2: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // hkey: 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + // hkey: 14 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + // hkey: 16 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // element: [uint64(8), uint64(16)] + 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, + // element: [uint64(10), uint64(20)] + 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, + // element: [uint64(12), uint64(24)] + 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, + // element: [uint64(14), uint64(28)] + 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, + // element: [uint64(16), uint64(32)] + 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, + // element: [uint64(18), uint64(36)] + 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + }, + + // external collision group + id3: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // hkey: 9 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // hkey: 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + // hkey: 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + // hkey: 15 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, + // hkey: 17 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + // hkey: 19 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element: [uint64(9), uint64(18)] + 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, + // element: [uint64(11), uint64(22))] + 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, + // element: [uint64(13), uint64(26)] + 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, + // element: [uint64(15), uint64(30)] + 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, + // element: [uint64(17), uint64(34)] + 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, + // element: [uint64(19), uint64(38)] + 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child map + typeInfo2 := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo2) + require.NoError(t, err) + + expectedChildMapValues := mapValue{} + for i := 0; i < 2; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat("b", 22)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[k] = v + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childMap + keyValues[k] = expectedChildMapValues + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2b, + // count + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x4f, 0x6a, 0x3e, 0x93, 0xdd, 0xb1, 0xbe, 0x5, + // hkey + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [1:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x1, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [0:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x0, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize-1; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Create child map + childTypeInfo := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childTypeInfo) + require.NoError(t, err) + + // Create grand child map + gchildTypeInfo := testTypeInfo{44} + + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gchildTypeInfo) + require.NoError(t, err) + + expectedGChildMapValues := mapValue{} + r := 'a' + for i := 0; i < 2; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedGChildMapValues[k] = v + + r++ + } + + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, Uint64Value(0), gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(mapSize - 1) + v := childMap + keyValues[k] = mapValue{Uint64Value(0): expectedGChildMapValues} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [0:0] + 0x82, + 0xd8, 0xa4, 0x0, + 0xd8, 0xa4, 0x0, + // element: [1:inlined map] + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2c, + // count + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x30, 0x43, 0xc5, 0x14, 0x8f, 0x52, 0x18, 0x43, + // hkey + 0x98, 0x0f, 0x5c, 0xdb, 0x37, 0x71, 0x6c, 0x13, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + const childArraySize = 5 + + typeInfo2 := testTypeInfo{43} + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + expectedChildValues := make([]Value, childArraySize) + for i := 0; i < childArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + + expectedChildValues[i] = v + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + + keyValues[k] = arrayValue(expectedChildValues) + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert nested array + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // metadata slab + id1: { + // version + 0x10, + // flag: root + map meta + 0x89, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf6, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xf2, + }, + + // data slab + id2: { + // version + 0x12, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x10, + // flag: has pointer + map data + 0x48, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + id4: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + require.Equal(t, expected[id4], stored[id4]) + + // Verify slab size in header is correct. + meta, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + require.Equal(t, 2, len(meta.childrenHeaders)) + require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) + require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + childTypeInfo := testTypeInfo{43} + + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + // Create grand child array + const gchildArraySize = 5 + + gchildTypeInfo := testTypeInfo{44} + + gchildArray, err := NewArray(storage, address, gchildTypeInfo) + require.NoError(t, err) + + expectedGChildValues := make([]Value, gchildArraySize) + for i := 0; i < gchildArraySize; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = gchildArray.Append(v) + require.NoError(t, err) + + expectedGChildValues[i] = v + } + + // Insert grand child array to child array + err = childArray.Append(gchildArray) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + + keyValues[k] = arrayValue{arrayValue(expectedGChildValues)} + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child array to parent map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // parent map root slab ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child array root slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // element 0 + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:inlined array] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + + // value: inlined array (tag: CBORTagInlinedArray) + 0xd8, 0xfa, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined array elements (1 element) + 0x99, 0x00, 0x01, + // SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // grand array data slab + id2: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to storable slab", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + v := Uint64Value(0) + + digests := []Digest{Digest(0), Digest(1)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedNoPointer := []byte{ + + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(stored)) + require.Equal(t, expectedNoPointer, stored[id1]) + + // Overwrite existing value with long string + vs := NewStringValue(strings.Repeat("a", 128)) + existingStorable, err = m.Set(compare, hashInputProvider, k, vs) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, v, existingValue) + + expectedHasPointer := []byte{ + + // version + 0x10, + // flag: root + pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), slab id] + 0x82, 0xd8, 0xa4, 0x00, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + } + + expectedStorableSlab := []byte{ + // version + 0x10, + // flag: storable + no size limit + 0x3f, + // "aaaa..." + 0x78, 0x80, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + } + + stored, err = storage.Encode() + require.NoError(t, err) + require.Equal(t, 2, len(stored)) + require.Equal(t, expectedHasPointer, stored[id1]) + require.Equal(t, expectedStorableSlab, stored[id2]) + }) + + t.Run("same composite with one field", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = mapValue{ck: cv} + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x48, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (same order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + expectedChildMapVaues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapVaues[ck] = cv + + ck = NewStringValue("amount") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapVaues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapVaues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, 0x3b, 0xef, 0x5b, 0xe2, 0x9b, 0x8d, 0xf9, 0x65, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["amount", "uuid"] + 0x82, 0x66, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (different order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 3 + keyValues := make(map[Value]Value, mapSize) + + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert first element "uuid" to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + // Insert second element to child map (second element is different) + switch i % 3 { + case 0: + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + + case 1: + ck = NewStringValue("b") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + + case 2: + ck = NewStringValue("c") + cv = Uint64Value(i * 2) + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + } + + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 3 + 0x03, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data + 0x83, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0x82, 0x41, 0xee, 0xef, 0xc7, 0xb3, 0x2f, 0x28, + // composite keys ["uuid", "b"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x62, + + // element 2 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 2 + 0x02, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // composite digests + 0x50, + 0x5a, 0x98, 0x80, 0xf4, 0xa6, 0x52, 0x9e, 0x2d, + 0x6d, 0x8a, 0x0a, 0xe7, 0x19, 0xf1, 0xbb, 0x8b, + // composite keys ["uuid", "c"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x63, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 3) + 0x59, 0x00, 0x18, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // elements (array of 3 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x03, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different number of fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + if i == 0 { + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + } + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x81, + 0xd8, 0xf6, 0x18, 0x2b, + // element 1: array of inlined extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, + 0x00, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x48, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("different composite", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testCompositeTypeInfo{43} + childMapTypeInfo2 := testCompositeTypeInfo{44} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 4 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues := mapValue{} + + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo1 + } else { + ti = childMapTypeInfo2 + } + + // Create child map, composite with two field "uuid" and "a" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + ck := NewStringValue("uuid") + cv := Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + ck = NewStringValue("a") + cv = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, ck, cv) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildMapValues[ck] = cv + + k := Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = expectedChildMapValues + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 4 + 0x04, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // inlined extra data + 0x82, + // element 0: array of inlined type info + 0x80, + // element 1: array of inlined extra data + 0x82, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2c, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0xea, 0x8e, 0x6f, 0x69, 0x81, 0x19, 0x68, 0x81, + // composite keys ["uuid", "a"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x61, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 4 + 0xd8, 0xa4, 0x04, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: inlined composite (tag: CBORTagInlinedCompactMap) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) +} + +func TestMapEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Create a new storage with encoded data from base storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Create new map from new storage + m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) + require.NoError(t, err) + + testMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) +} + +func TestMapStoredValue(t *testing.T) { + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + keyValues := make(map[Value]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + keyValues[k] = Uint64Value(i) + i++ + } + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + rootID := m.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + m2, ok := value.(*OrderedMap) + require.True(t, ok) + + testMap(t, storage, typeInfo, address, m2, keyValues, nil, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestMapPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + i := uint64(0) + err = m.PopIterate(func(k Storable, v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-dataslab", func(t *testing.T) { + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + key, value := Uint64Value(i), Uint64Value(i*10) + sortedKeys[i] = key + keyValues[key] = value + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := mapSize + err = m.PopIterate(func(k, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-metaslab", func(t *testing.T) { + const mapSize = 4096 + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + if _, found := keyValues[k]; !found { + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + i++ + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + err = storage.Commit() + require.NoError(t, err) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i = len(keyValues) + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("collision", func(t *testing.T) { + //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := &mockDigesterBuilder{} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + + if _, found := keyValues[k]; !found { + + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + + digests := []Digest{ + Digest(i % 100), + Digest(i % 5), + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) + require.NoError(t, err) + require.Nil(t, existingStorable) + + i++ + } + } + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + err = storage.Commit() + require.NoError(t, err) + + // Iterate key value pairs + i = mapSize + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + testEmptyMap(t, storage, typeInfo, address, m) + }) +} + +func TestEmptyMap(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, s) + }) + + t.Run("remove", func(t *testing.T) { + existingMapKeyStorable, existingMapValueStorable, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, existingMapKeyStorable) + require.Nil(t, existingMapValueStorable) + }) + + t.Run("readonly iterate", func(t *testing.T) { + i := 0 + err := m.IterateReadOnly(func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("iterate", func(t *testing.T) { + i := 0 + err := m.Iterate(compare, hashInputProvider, func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("count", func(t *testing.T) { + count := m.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, m.Type())) + }) + + t.Run("address", func(t *testing.T) { + require.Equal(t, address, m.Address()) + }) + + // TestMapEncodeDecode/empty tests empty map encoding and decoding +} + +func TestMapFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + NewDefaultDigesterBuilder(), + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + testEmptyMap(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + + k, v, err := iter.Next() + + // Save key value pair + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 8 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + storable, err := m.Set( + compare, + hashInputProvider, + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + ) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + require.Equal(t, typeInfo, m.Type()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for m.Count() < mapSize { + k := randomValue(r, int(maxInlineMapElementSize)) + v := randomValue(r, int(maxInlineMapElementSize)) + + _, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + var sortedKeys []Value + keyValues := make(map[Value]Value, mapSize) + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("collision", func(t *testing.T) { + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + MaxCollisionLimitPerDigest = mapSize / 2 + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + + k, v := Uint64Value(i), Uint64Value(i*10) + + digests := make([]Digest, 2) + if i%2 == 0 { + digests[0] = 0 + } else { + digests[0] = Digest(i % (mapSize / 2)) + } + digests[1] = Digest(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + i := 0 + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + i++ + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + maxStringSize := int(maxInlineMapKeySize - 2) + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue(randStr(r, maxStringSize)) + v = NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") + v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + iter, err := m.ReadOnlyIterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + testMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) +} + +func TestMapNestedStorables(t *testing.T) { + + t.Run("SomeValue", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + keyValues[k] = v + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) + + t.Run("Array", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + // Create a child array with one element + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + err = childArray.Append(v) + require.NoError(t, err) + + // Insert nested array into map + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + keyValues[k] = arrayValue{v} + + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) +} + +func TestMapMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + maxStringSize := int(maxInlineMapKeySize - 2) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for len(keyValues) < 2 { + // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + keyValues[k] = v + + _, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.True(t, m.root.IsData()) + + // Size of root data slab with two elements (key+value pairs) of + // max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab) + require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) +} + +func TestMapString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2]` + require.Equal(t, want, m.String()) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` + require.Equal(t, want, m.String()) + }) +} + +func TestMapSlabDump(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", + "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("inline collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", + "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("external collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", + "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", + "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("key overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) - // map data slab - mapSlabID: { - // extra data - // version - 0x00, - // flag: root + has pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x14, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + t.Run("value overflow", func(t *testing.T) { - // version - 0x00, - // flag: root + has pointer + map data - 0xc8, + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // the following encoded data is valid CBOR + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 3 elements) - 0x83, + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) - // level: 0 - 0x00, + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // hkeys (byte string of length 8 * 2) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} - // elements (array of 2 elements) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, +func TestMaxCollisionLimitPerDigest(t *testing.T) { + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() - // external collision group corresponding to hkey 0 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + t.Run("collision limit 0", func(t *testing.T) { + const mapSize = 1024 - // external collision group corresponding to hkey 1 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - }, + SetThreshold(256) + defer SetThreshold(1024) - // external collision group - id2: { - // version - 0x00, - // flag: any size + collision group - 0x2b, - // next slab id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Set noncryptographic hash collision limit as 0, + // meaning no collision is allowed at first level. + MaxCollisionLimitPerDigest = uint32(0) - // the following encoded data is valid CBOR + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v - // elements (array of 3 elements) - 0x83, + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } - // level: 1 - 0x01, + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - // hkeys (byte string of length 8 * 10) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 8 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 10 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // hkey: 12 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, - // hkey: 14 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, - // hkey: 16 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 18 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - // element: [uint64(8), uint64(16)] - 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, - // element: [uint64(10), uint64(20)] - 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, - // element: [uint64(12), uint64(24)] - 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, - // element: [uint64(14), uint64(28)] - 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, - // element: [uint64(16), uint64(32)] - 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, - // element: [uint64(18), uint64(36)] - 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, - }, + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - // external collision group - id3: { - // version - 0x00, - // flag: any size + collision group - 0x2b, - // next slab id - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) - // the following encoded data is valid CBOR + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v - // elements (array of 3 elements) - 0x83, + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } - // level: 1 - 0x01, + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } - // hkeys (byte string of length 8 * 10) - 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - // hkey: 9 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - // hkey: 11 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, - // hkey: 13 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, - // hkey: 15 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - // hkey: 17 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - // hkey: 19 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + // Verify that no new elements exceeding collision limit inserted + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - // element: [uint64(9), uint64(18)] - 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, - // element: [uint64(11), uint64(22))] - 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, - // element: [uint64(13), uint64(26)] - 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, - // element: [uint64(15), uint64(30)] - 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, - // element: [uint64(17), uint64(34)] - 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, - // element: [uint64(19), uint64(38)] - 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, - }, + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("collision limit > 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 7, + // meaning at most 8 elements in collision group per digest at first level. + MaxCollisionLimitPerDigest = uint32(7) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) } - // Decode data to new storage - storage := newTestPersistentStorageWithData(t, slabData) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - // Test new map from storage - decodedMap, err := NewMapWithRootID(storage, mapSlabID, digesterBuilder) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, decodedMap, keyValues, nil, false) + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } -func TestMapEncodeDecode(t *testing.T) { +func TestMapLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) typeInfo := testTypeInfo{42} address := Address{1, 2, 3, 4, 5, 6, 7, 8} t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) - storage := newTestBasicStorage(t) + digesterBuilder := &mockDigesterBuilder{} - // Create map - m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), m.Count()) - - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - - expected := map[SlabID][]byte{ - id1: { - // version - 0x10, - // flag: root + map data - 0x88, - // extra data - // CBOR encoded array of 3 elements - 0x83, - // type info - 0x18, 0x2a, - // count: 0 - 0x00, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // the following encoded data is valid CBOR + testMapLoadedElements(t, m, nil) + }) - // elements (array of 3 elements) - 0x83, + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 0 - 0x00, + const mapSize = 3 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x00, + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // elements (array of 0 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x00, - }, - } + testMapLoadedElements(t, m, values) + }) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(stored)) - require.Equal(t, expected[id1], stored[id1]) + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + const mapSize = 3 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, NewDefaultDigesterBuilder()) - require.NoError(t, err) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyEmptyMap(t, storage2, typeInfo, address, decodedMap) + testMapLoadedElements(t, m, values) }) - t.Run("dataslab as root", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - - // Create and populate map in memory - storage := newTestBasicStorage(t) - - digesterBuilder := &mockDigesterBuilder{} - - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + t.Run("root data slab with composite values in collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) - const mapSize = 1 - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) - keyValues[k] = v + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - digests := []Digest{Digest(i), Digest(i * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + testMapLoadedElements(t, m, values) + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root data slab with composite values in external collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Create parent map with 3 external collision group, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - id1: { - // version - 0x10, - // flag: root + map data - 0x88, + testMapLoadedElements(t, m, values) + }) - // extra data - // CBOR encoded array of 3 elements - 0x83, - // type info - 0x18, 0x2a, - // count: 1 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // the following encoded data is valid CBOR + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // elements (array of 3 elements) - 0x83, + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // level: 0 - 0x00, + testMapLoadedElements(t, m, values) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0):uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - }, + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) } + }) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) + t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) - require.NoError(t, err) + testMapLoadedElements(t, m, values) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) - }) + // Unload external key from front to back. + for i := 0; i < len(values); i++ { + k := values[i][0] - t.Run("has pointer no collision", func(t *testing.T) { + s, ok := k.(StringValue) + require.True(t, ok) - SetThreshold(256) - defer SetThreshold(1024) + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - // Create and populate map in memory - storage := newTestBasicStorage(t) + require.NoError(t, keyID.Valid()) - digesterBuilder := &mockDigesterBuilder{} + err := storage.Remove(keyID) + require.NoError(t, err) - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) - const mapSize = 8 - keyValues := make(map[Value]Value, mapSize) - r := 'a' - for i := uint64(0); i < mapSize-1; i++ { - k := NewStringValue(strings.Repeat(string(r), 22)) - v := NewStringValue(strings.Repeat(string(r), 22)) - keyValues[k] = v + t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - digests := []Digest{Digest(i), Digest(i * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) - require.Nil(t, existingStorable) - r++ + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) } + }) - // Create nested array - typeInfo2 := testTypeInfo{43} + t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - nested, err := NewArray(storage, address, typeInfo2) - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - err = nested.Append(Uint64Value(0)) - require.NoError(t, err) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - k := NewStringValue(strings.Repeat(string(r), 22)) - v := nested - keyValues[k] = v + testMapLoadedElements(t, m, values) - digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - // Insert nested array - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // metadata slab - id1: { - // version - 0x10, - // flag: root + map meta - 0x89, + testMapLoadedElements(t, m, values) - // extra data - // CBOR encoded array of 3 elements - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // Unload external collision group slab from front to back - // child shared address - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - // child header count - 0x00, 0x02, - // child header 1 (slab id, first key, size) - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0xf6, - // child header 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x00, 0xf2, - }, + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) - // data slab - id2: { - // version - 0x12, - // flag: map data - 0x08, - // next slab id - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + for i, id := range externalCollisionSlabIDs { + err := storage.Remove(id) + require.NoError(t, err) - // the following encoded data is valid CBOR + expectedValues := values[i*4+4:] + testMapLoadedElements(t, m, expectedValues) + } + }) - // elements (array of 3 elements) - 0x83, + t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 0 - 0x00, + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // elements (array of 4 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x04, - // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] - 0x82, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] - 0x82, - 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, - 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, - // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] - 0x82, - 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, - // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] - 0x82, - 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, - 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, - }, + testMapLoadedElements(t, m, values) + + // Unload composite element from back to front. + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) - // data slab - id3: { - // version - 0x10, - // flag: has pointer + map data - 0x48, + t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // the following encoded data is valid CBOR + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - // elements (array of 3 elements) - 0x83, + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // level: 0 - 0x00, + testMapLoadedElements(t, m, values) - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // Unload composite element from front to back. + for i := len(values) - 1; i >= 0; i-- { + k := values[i][0] - // elements (array of 4 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x04, - // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] - 0x82, - 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, - 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, - // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] - 0x82, - 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, - // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] - 0x82, - 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, - 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, - // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] - 0x82, - 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - // array data slab - id4: { - // version - 0x10, - // flag: root + array data - 0x80, - // extra data (CBOR encoded array of 1 elements) - 0x81, - // type info - 0x18, 0x2b, + s, ok := k.(StringValue) + require.True(t, ok) - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, - }, - } + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) + require.NoError(t, keyID.Valid()) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) - require.Equal(t, expected[id2], stored[id2]) - require.Equal(t, expected[id3], stored[id3]) - require.Equal(t, expected[id4], stored[id4]) + err := storage.Remove(keyID) + require.NoError(t, err) - // Verify slab size in header is correct. - meta, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) - require.Equal(t, 2, len(meta.childrenHeaders)) - require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - // Need to add slabIDSize to encoded data slab here because empty slab ID is omitted during encoding. - require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) - require.NoError(t, err) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) - }) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("inline collision 1 level", func(t *testing.T) { + testMapLoadedElements(t, m, values) - SetThreshold(256) - defer SetThreshold(1024) + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - // Create and populate map in memory - storage := newTestBasicStorage(t) + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) + } + }) - digesterBuilder := &mockDigesterBuilder{} + t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - const mapSize = 8 - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - digests := []Digest{Digest(i % 4), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + testMapLoadedElements(t, m, values) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) require.NoError(t, err) - require.Nil(t, existingStorable) - keyValues[k] = v + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) } + }) - require.Equal(t, uint64(mapSize), m.Count()) - - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // map metadata slab - id1: { - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // the following encoded data is valid CBOR + testMapLoadedElements(t, m, values) - // elements (array of 3 elements) - 0x83, + // Unload external slabs from back to front + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - // level: 0 - 0x00, + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { + err := storage.Remove(externalCollisionSlabIDs[i]) + require.NoError(t, err) - // elements (array of 2 elements) - 0x99, 0x00, 0x04, + expectedValues := values[:i*4] + testMapLoadedElements(t, m, expectedValues) + } + }) - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 1 - 0x01, + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + testMapLoadedElements(t, m, values) - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Unload value in the middle + unloadValueIndex := 1 - // level: 1 - 0x01, + err := storage.Remove(childSlabIDs[unloadValueIndex]) + require.NoError(t, err) - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + testMapLoadedElements(t, m, values) + }) - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 1 - 0x01, + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + testMapLoadedElements(t, m, values) - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Unload key in the middle. + unloadValueIndex := 1 - // level: 1 - 0x01, + k := values[unloadValueIndex][0] - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + s, ok := k.(StringValue) + require.True(t, ok) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - }, + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } } - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) - - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + require.NoError(t, keyID.Valid()) - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + err := storage.Remove(keyID) require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + testMapLoadedElements(t, m, values) }) - t.Run("inline collision 2 levels", func(t *testing.T) { + t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - SetThreshold(256) - defer SetThreshold(1024) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - // Create and populate map in memory - storage := newTestBasicStorage(t) + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - digesterBuilder := &mockDigesterBuilder{} + testMapLoadedElements(t, m, values) - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Unload composite element in the middle + for _, unloadValueIndex := range []int{1, 3, 5} { + err := storage.Remove(childSlabIDs[unloadValueIndex]) + require.NoError(t, err) + } - const mapSize = 8 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + } + testMapLoadedElements(t, m, expectedValues) + }) - digests := []Digest{Digest(i % 4), Digest(i % 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + testMapLoadedElements(t, m, values) + + // Unload composite value in the middle. + for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { + err := storage.Remove(childSlabIDs[unloadValueIndex]) require.NoError(t, err) - require.Nil(t, existingStorable) + } - keyValues[k] = v + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + values[6], + values[8], + values[10], } + testMapLoadedElements(t, m, expectedValues) + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // map data slab - id1: { - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2a, - // count: 8 - 0x08, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + testMapLoadedElements(t, m, values) + + // Unload external slabs in the middle. + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) + + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + + id := externalCollisionSlabIDs[1] + err := storage.Remove(id) + require.NoError(t, err) + + copy(values[4:], values[8:]) + values = values[:8] - // the following encoded data is valid CBOR + testMapLoadedElements(t, m, values) + }) - // elements (array of 3 elements) - 0x83, + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 0 - 0x00, + const mapSize = 3 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // elements (array of 4 elements) - 0x99, 0x00, 0x04, + testMapLoadedElements(t, m, values) - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + i := 0 + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { + // At this point, iterator returned first element (v). - // level 1 - 0x01, + // Remove all other nested composite elements (except first element) from storage. + for _, slabID := range childSlabIDs[1:] { + err := storage.Remove(slabID) + require.NoError(t, err) + } - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + require.Equal(t, 0, i) + valueEqual(t, values[0][0], k) + valueEqual(t, values[0][1], v) + i++ + return true, nil + }) - // elements (array of 1 elements) - 0x99, 0x00, 0x01, + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) - // inline collision group corresponding to hkey [0, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { + const mapSize = 3 - // level: 2 - 0x02, + // Create a map with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { + storage := newTestPersistentStorage(t) - // hkeys (empty byte string) - 0x40, + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + childArrayIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // parent map: 1 root data slab + // composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + testMapLoadedElements(t, m, values) - // level: 1 - 0x01, + // Unload composite value + err := storage.Remove(childSlabID) + require.NoError(t, err) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] - // elements (array of 1 elements) - 0x99, 0x00, 0x01, + testMapLoadedElements(t, m, values) + } + }) - // inline collision group corresponding to hkey [1, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 2 - 0x02, + const mapSize = 20 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (empty byte string) - 0x40, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + testMapLoadedElements(t, m, values) + }) - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 1 - 0x01, + const mapSize = 20 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // elements (array of 1 element) - 0x99, 0x00, 0x01, + testMapLoadedElements(t, m, values) + }) - // inline collision group corresponding to hkey [2, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 2 - 0x02, + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // hkeys (empty byte string) - 0x40, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values : 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + testMapLoadedElements(t, m, values) - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - // level: 1 - 0x01, + expectedValues := values[i+1:] + testMapLoadedElements(t, m, expectedValues) + } + }) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // elements (array of 1 element) - 0x99, 0x00, 0x01, + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // inline collision group corresponding to hkey [3, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // level: 2 - 0x02, + testMapLoadedElements(t, m, values) - // hkeys (empty byte string) - 0x40, + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - }, + expectedValues := values[:i] + testMapLoadedElements(t, m, expectedValues) } + }) + + t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 20 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + testMapLoadedElements(t, m, values) - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) - require.NoError(t, err) + // Unload composite element in the middle + for _, index := range []int{4, 14} { + err := storage.Remove(childSlabIDs[index]) + require.NoError(t, err) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) - }) + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] - t.Run("external collision", func(t *testing.T) { + copy(childSlabIDs[index:], childSlabIDs[index+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - SetThreshold(256) - defer SetThreshold(1024) + testMapLoadedElements(t, m, values) + } + }) - // Create and populate map in memory - storage := newTestBasicStorage(t) + t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { + const mapSize = 20 - digesterBuilder := &mockDigesterBuilder{} + // Create a map with nested composite value at specified index + for childArrayIndex := 0; childArrayIndex < mapSize; childArrayIndex++ { + storage := newTestPersistentStorage(t) - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + m, values, childSlabID := createMapWithSimpleAndChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + childArrayIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - const mapSize = 20 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 5, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - digests := []Digest{Digest(i % 2), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + testMapLoadedElements(t, m, values) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(childSlabID) require.NoError(t, err) - require.Nil(t, existingStorable) - keyValues[k] = v + copy(values[childArrayIndex:], values[childArrayIndex+1:]) + values = values[:len(values)-1] + + testMapLoadedElements(t, m, values) } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + const mapSize = 20 - // Expected serialized slab data with slab id - expected := map[SlabID][]byte{ + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // map data slab - id1: { - // version - 0x10, - // flag: root + has pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x14, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // the following encoded data is valid CBOR + testMapLoadedElements(t, m, values) - // elements (array of 3 elements) - 0x83, + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // level: 0 - 0x00, + // Unload data slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + childHeader := rootMetaDataSlab.childrenHeaders[i] - // elements (array of 2 elements) - 0x99, 0x00, 0x02, + // Get data slab element count before unload it from storage. + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - // external collision group corresponding to hkey 0 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + count := mapDataSlab.elements.Count() - // external collision group corresponding to hkey 1 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - }, + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - // external collision group - id2: { - // version - 0x10, - // flag: any size + collision group - 0x2b, + values = values[count:] - // the following encoded data is valid CBOR + testMapLoadedElements(t, m, values) + } + }) - // elements (array of 3 elements) - 0x83, + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // level: 1 - 0x01, + const mapSize = 20 - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 8 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 10 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // hkey: 12 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, - // hkey: 14 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, - // hkey: 16 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 18 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - // element: [uint64(8), uint64(16)] - 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, - // element: [uint64(10), uint64(20)] - 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, - // element: [uint64(12), uint64(24)] - 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, - // element: [uint64(14), uint64(28)] - 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, - // element: [uint64(16), uint64(32)] - 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, - // element: [uint64(18), uint64(36)] - 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, - }, + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - // external collision group - id3: { - // version - 0x10, - // flag: any size + collision group - 0x2b, + testMapLoadedElements(t, m, values) - // the following encoded data is valid CBOR + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // elements (array of 3 elements) - 0x83, + // Unload data slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - // level: 1 - 0x01, + childHeader := rootMetaDataSlab.childrenHeaders[i] - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - // hkey: 9 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - // hkey: 11 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, - // hkey: 13 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, - // hkey: 15 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - // hkey: 17 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - // hkey: 19 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + // Get data slab element count before unload it from storage + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) + + count := mapDataSlab.elements.Count() + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - // element: [uint64(9), uint64(18)] - 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, - // element: [uint64(11), uint64(22))] - 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, - // element: [uint64(13), uint64(26)] - 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, - // element: [uint64(15), uint64(30)] - 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, - // element: [uint64(17), uint64(34)] - 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, - // element: [uint64(19), uint64(38)] - 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, - }, + values = values[:len(values)-int(count)] + + testMapLoadedElements(t, m, values) } + }) - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, len(expected), len(stored)) - require.Equal(t, expected[id1], stored[id1]) - require.Equal(t, expected[id2], stored[id2]) - require.Equal(t, expected[id3], stored[id3]) + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Decode data to new storage - storage2 := newTestPersistentStorageWithData(t, stored) + const mapSize = 20 - // Test new map from storage2 - decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) - require.NoError(t, err) + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) - }) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - t.Run("pointer", func(t *testing.T) { - // Create and populate map in memory - storage := newTestBasicStorage(t) + testMapLoadedElements(t, m, values) - digesterBuilder := &mockDigesterBuilder{} + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) - k := Uint64Value(0) - v := Uint64Value(0) + index := 1 + childHeader := rootMetaDataSlab.childrenHeaders[index] - digests := []Digest{Digest(0), Digest(1)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Get element count from previous data slab + mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + countAtIndex0 := mapDataSlab.elements.Count() + + // Get element count from slab to be unloaded + mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) + require.True(t, ok) + + countAtIndex1 := mapDataSlab.elements.Count() + + err := storage.Remove(childHeader.slabID) require.NoError(t, err) - require.Nil(t, existingStorable) - require.Equal(t, uint64(1), m.Count()) + copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) + values = values[:m.Count()-uint64(countAtIndex1)] - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + testMapLoadedElements(t, m, values) + }) - expectedNoPointer := []byte{ + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + const mapSize = 200 - // the following encoded data is valid CBOR + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // elements (array of 3 elements) - 0x83, + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - // level: 0 - 0x00, + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Unload non-root metadata slabs from front to back. + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + // Use firstKey to deduce number of elements in slab. + var expectedValues [][2]Value + if i < len(rootMetaDataSlab.childrenHeaders)-1 { + nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] + expectedValues = values[int(nextChildHeader.firstKey):] + } + + testMapLoadedElements(t, m, expectedValues) } + }) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(stored)) - require.Equal(t, expectedNoPointer, stored[id1]) + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Overwrite existing value with long string - vs := NewStringValue(strings.Repeat("a", 512)) - existingStorable, err = m.Set(compare, hashInputProvider, k, vs) - require.NoError(t, err) + const mapSize = 200 - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - expectedHasPointer := []byte{ + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - // version - 0x10, - // flag: root + pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - // the following encoded data is valid CBOR + // Unload non-root metadata slabs from back to front. + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - // elements (array of 3 elements) - 0x83, + childHeader := rootMetaDataSlab.childrenHeaders[i] - // level: 0 - 0x00, + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Use firstKey to deduce number of elements in slabs. + values = values[:childHeader.firstKey] - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), slab id] - 0x82, 0xd8, 0xa4, 0x00, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + testMapLoadedElements(t, m, values) } - - stored, err = storage.Encode() - require.NoError(t, err) - require.Equal(t, 2, len(stored)) - require.Equal(t, expectedHasPointer, stored[id1]) }) -} -func TestMapEncodeDecodeRandomValues(t *testing.T) { - - SetThreshold(256) - defer SetThreshold(1024) + t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { - r := newRand(t) + storage := newTestPersistentStorage(t) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + const mapSize = 500 + m, values, childSlabIDs := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testMapLoadedElements(t, m, values) - // Create a new storage with encoded data from base storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + r := newRand(t) - // Create new map from new storage - m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) - require.NoError(t, err) + // Unload composite element in random position + for len(values) > 0 { - verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) -} + i := r.Intn(len(values)) -func TestMapStoredValue(t *testing.T) { + err := storage.Remove(childSlabIDs[i]) + require.NoError(t, err) - const mapSize = 4096 + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] - r := newRand(t) + copy(childSlabIDs[i:], childSlabIDs[i+1:]) + childSlabIDs = childSlabIDs[:len(childSlabIDs)-1] - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + testMapLoadedElements(t, m, values) + } + }) - keyValues := make(map[Value]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - keyValues[k] = Uint64Value(i) - i++ - } + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + storage := newTestPersistentStorage(t) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + const mapSize = 500 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - rootID := m.SlabID() + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + testMapLoadedElements(t, m, values) - for { - id, slab := slabIterator() + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - if id == SlabIDUndefined { - break + type slabInfo struct { + id SlabID + startIndex int + count int } - value, err := slab.StoredValue(storage) - - if id == rootID { - require.NoError(t, err) + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { - m2, ok := value.(*OrderedMap) + nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) require.True(t, ok) - verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { + h := nonRootMetaDataSlab.childrenHeaders[i] + + if len(dataSlabInfos) > 0 { + // Update previous slabInfo.count + dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex + } + + dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) + } } - } -} -func TestMapPopIterate(t *testing.T) { + r := newRand(t) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() + for len(dataSlabInfos) > 0 { + index := r.Intn(len(dataSlabInfos)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + slabToBeRemoved := dataSlabInfos[index] - err = storage.Commit() - require.NoError(t, err) + // Update startIndex for all subsequence data slabs + for i := index + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabToBeRemoved.count + } - require.Equal(t, 1, storage.Count()) + err := storage.Remove(slabToBeRemoved.id) + require.NoError(t, err) - i := uint64(0) - err = m.PopIterate(func(k Storable, v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + if index == len(dataSlabInfos)-1 { + values = values[:slabToBeRemoved.startIndex] + } else { + copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) + values = values[:len(values)-slabToBeRemoved.count] + } + + copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + testMapLoadedElements(t, m, values) + } - verifyEmptyMap(t, storage, typeInfo, address, m) + require.Equal(t, 0, len(values)) }) - t.Run("root-dataslab", func(t *testing.T) { - const mapSize = 10 + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values, _ := createMapWithChildArrayValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - key, value := Uint64Value(i), Uint64Value(i*10) - sortedKeys[i] = key - keyValues[key] = value + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - existingStorable, err := m.Set(compare, hashInputProvider, key, value) - require.NoError(t, err) - require.Nil(t, existingStorable) + testMapLoadedElements(t, m, values) + + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo } - require.Equal(t, uint64(mapSize), m.Count()) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - err = storage.Commit() - require.NoError(t, err) + metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { - require.Equal(t, 1, storage.Count()) + if i > 0 { + prevMetaDataSlabInfo := metadataSlabInfos[i-1] + prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + // Update previous metadata slab count + prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex - i := mapSize - err = m.PopIterate(func(k, v Storable) { - i-- + // Update previous data slab count + prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex + } - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + metadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: int(mheader.firstKey), + } - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - require.NoError(t, err) - require.Equal(t, 0, i) + children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) + for i, h := range nonRootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: int(h.firstKey), + } + if i > 0 { + children[i-1].count = int(h.firstKey) - children[i-1].startIndex + } + } - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + metadataSlabInfo.children = children + metadataSlabInfos[i] = metadataSlabInfo + } - t.Run("root-metaslab", func(t *testing.T) { - const mapSize = 4096 + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) r := newRand(t) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - if _, found := keyValues[k]; !found { - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) - i++ - } - } - - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) - digesterBuilder := newBasicDigesterBuilder() - - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + for len(metadataSlabInfos) > 0 { - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool - err = storage.Commit() - require.NoError(t, err) + switch r.Intn(maxSlabType) { - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + case metadataSlabType: - // Iterate key value pairs - i = len(keyValues) - err = m.PopIterate(func(k Storable, v Storable) { - i-- + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] - require.NoError(t, err) - require.Equal(t, 0, i) + count := slabInfoToBeRemoved.count - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + // Update startIndex for subsequence metadata slabs + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - t.Run("collision", func(t *testing.T) { - //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - const mapSize = 1024 + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - SetThreshold(512) - defer SetThreshold(1024) + case dataSlabType: - r := newRand(t) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} - storage := newTestPersistentStorage(t) + metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) + isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && + (dataSlabIndex == len(metadataSlabInfo.children)-1) - if _, found := keyValues[k]; !found { + slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) + count := slabInfoToBeRemoved.count - digests := []Digest{ - Digest(i % 100), - Digest(i % 5), + // Update startIndex for all subsequence data slabs in this metadata slab info + for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { + metadataSlabInfo.children[i].startIndex -= count } - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - - existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) - require.NoError(t, err) - require.Nil(t, existingStorable) + copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) + metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] - i++ - } - } + metadataSlabInfo.count -= count - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - err = storage.Commit() - require.NoError(t, err) + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - // Iterate key value pairs - i = mapSize - err = m.PopIterate(func(k Storable, v Storable) { - i-- + if len(metadataSlabInfo.children) == 0 { + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + } + } - kv, err := k.StoredValue(storage) + err := storage.Remove(slabInfoToBeRemoved.id) require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } - require.NoError(t, err) - require.Equal(t, 0, i) + testMapLoadedElements(t, m, values) + } - verifyEmptyMap(t, storage, typeInfo, address, m) + require.Equal(t, 0, len(values)) }) } -func TestEmptyMap(t *testing.T) { - - t.Parallel() +func createMapWithLongStringKey( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, +) (*OrderedMap, [][2]Value) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := &mockDigesterBuilder{} - m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + // Create parent map. + m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - t.Run("get", func(t *testing.T) { - s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, s) - }) + expectedValues := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { + s := strings.Repeat(string(r), int(maxInlineMapElementSize)) - t.Run("remove", func(t *testing.T) { - existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, existingKey) - require.Nil(t, existingValue) - }) + k := NewStringValue(s) + v := Uint64Value(i) - t.Run("iterate", func(t *testing.T) { - i := 0 - err := m.Iterate(func(k Value, v Value) (bool, error) { - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, 0, i) - }) + expectedValues[i] = [2]Value{k, v} - t.Run("count", func(t *testing.T) { - count := m.Count() - require.Equal(t, uint64(0), count) - }) + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, m.Type())) - }) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("address", func(t *testing.T) { - require.Equal(t, address, m.Address()) - }) + r++ + } - // TestMapEncodeDecode/empty tests empty map encoding and decoding + return m, expectedValues } -func TestMapFromBatchData(t *testing.T) { - - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) - require.Equal(t, uint64(0), m.Count()) - - iter, err := m.Iterator() - require.NoError(t, err) - - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} +func createMapWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + digesterBuilder := &mockDigesterBuilder{} - verifyEmptyMap(t, storage, typeInfo, address, copied) - }) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - t.Run("root-dataslab", func(t *testing.T) { - SetThreshold(1024) + expectedValues := make([][2]Value, size) + r := rune('a') + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 20)) - const mapSize = 10 + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - typeInfo := testTypeInfo{42} + expectedValues[i] = [2]Value{k, v} - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) + existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) require.NoError(t, err) + require.Nil(t, existingStorable) + } - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + return m, expectedValues +} - require.Equal(t, uint64(mapSize), m.Count()) +func createMapWithChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value, []SlabID) { + const childArraySize = 50 - iter, err := m.Iterator() - require.NoError(t, err) + // Use mockDigesterBuilder to guarantee element order. + digesterBuilder := &mockDigesterBuilder{} - var sortedKeys []Value - keyValues := make(map[Value]Value) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + slabIDs := make([]SlabID, size) + expectedValues := make([][2]Value, size) + for i := 0; i < size; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) - k, v, err := iter.Next() + err = childArray.Append(v) + require.NoError(t, err) - // Save key value pair - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + expectedChildValues[j] = v + } - return k, v, err - }) + k := Uint64Value(i) + v := childArray + + expectedValues[i] = [2]Value{k, arrayValue(expectedChildValues)} + slabIDs[i] = childArray.SlabID() + + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Set child array to parent + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + require.Nil(t, existingStorable) + } - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + return m, expectedValues, slabIDs +} - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) +func createMapWithSimpleAndChildArrayValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + compositeValueIndex int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value, SlabID) { + const childArraySize = 50 - const mapSize = 4096 + digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + var slabID SlabID + values := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + k := Uint64Value(i) + + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + if compositeValueIndex == i { + // Create child array with one element + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Nil(t, storable) - } - require.Equal(t, uint64(mapSize), m.Count()) + expectedChildValues := make([]Value, childArraySize) + for j := 0; j < childArraySize; j++ { + v := Uint64Value(j) + err = childArray.Append(v) + require.NoError(t, err) - iter, err := m.Iterator() - require.NoError(t, err) + expectedChildValues[j] = v + } - var sortedKeys []Value - keyValues := make(map[Value]Value) + values[i] = [2]Value{k, arrayValue(expectedChildValues)} - storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + existingStorable, err := m.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + slabID = childArray.SlabID() - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + } else { + v := NewStringValue(strings.Repeat(string(r), 18)) + values[i] = [2]Value{k, v} - return k, v, err - }) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + } - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + return m, values, slabID +} - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) +func testMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { + i := 0 + err := m.IterateReadOnlyLoadedValues(func(k Value, v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, expectedValues[i][0], k) + valueEqual(t, expectedValues[i][1], v) + i++ + return true, nil }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} + +func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*MapMetaDataSlab); ok { + counter++ + } + } + return counter +} + +func TestMaxInlineMapValueSize(t *testing.T) { + + t.Run("small key", func(t *testing.T) { + // Value has larger max inline size when key is less than max map key size. - t.Run("rebalance two data slabs", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - const mapSize = 10 + mapSize := 2 + keyStringSize := 16 // Key size is less than max map key size. + valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - require.Nil(t, storable) + require.Nil(t, existingStorable) } - k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + // Both key and value are stored in map slab. + require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, uint64(mapSize+1), m.Count()) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - iter, err := m.Iterator() - require.NoError(t, err) + t.Run("max size key", func(t *testing.T) { + // Value max size is about half of max map element size when key is exactly max map key size. - var sortedKeys []Value - keyValues := make(map[Value]Value) + SetThreshold(256) + defer SetThreshold(1024) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + mapSize := 1 + keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). + valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + r := newRand(t) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } - return k, v, err - }) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Key is stored in map slab, while value is stored separately in storable slab. + require.Equal(t, 2, len(storage.deltas)) + + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) - t.Run("merge two data slabs", func(t *testing.T) { + t.Run("large key", func(t *testing.T) { + // Value has larger max inline size when key is more than max map key size because + // when key size exceeds max map key size, it is stored in a separate storable slab, + // and SlabIDStorable is stored as key in the map, which is 19 bytes. + SetThreshold(256) defer SetThreshold(1024) - const mapSize = 8 + mapSize := 1 + keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size + valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v + } typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) - require.Nil(t, storable) + require.Nil(t, existingStorable) } - storable, err := m.Set( - compare, - hashInputProvider, - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - ) - require.NoError(t, err) - require.Nil(t, storable) + // Key is stored in separate storable slabs, while value is stored in map slab. + require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, uint64(mapSize+1), m.Count()) - require.Equal(t, typeInfo, m.Type()) + testMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} - iter, err := m.Iterator() - require.NoError(t, err) +func TestMapID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - var sortedKeys []Value - keyValues := make(map[Value]Value) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + sid := m.SlabID() + id := m.ValueID() - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() +func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { + const ( + mapSize = 3 + keyStringSize = 16 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + keyValues := make(map[Value]*testMutableValue, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := newTestMutableValue(initialStorableSize) + keyValues[k] = v + } - return k, v, err - }) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + require.True(t, m.root.IsData()) - const mapSize = 4096 + expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize + expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - r := newRand(t) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + // Reset mutable values after changing its storable size + for k, v := range keyValues { + v.updateStorableSize(mutatedStorableSize) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.NotNil(t, existingStorable) + } - for m.Count() < mapSize { - k := randomValue(r, int(maxInlineMapElementSize)) - v := randomValue(r, int(maxInlineMapElementSize)) + require.True(t, m.root.IsData()) - _, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - } + expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize + expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - require.Equal(t, uint64(mapSize), m.Count()) + err = VerifyMap(m, address, typeInfo, typeInfoComparator, hashInputProvider, true) + require.NoError(t, err) +} - iter, err := m.Iterator() - require.NoError(t, err) +func TestChildMapInlinabilityInParentMap(t *testing.T) { - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + SetThreshold(256) + defer SetThreshold(1024) - var sortedKeys []Value - keyValues := make(map[Value]Value, mapSize) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + t.Run("parent is root data slab, with one child map", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + ) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - return k, v, err - }) + r := newRand(t) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - t.Run("collision", func(t *testing.T) { + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - const mapSize = 1024 + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - SetThreshold(512) - defer SetThreshold(1024) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() - MaxCollisionLimitPerDigest = mapSize / 2 + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - typeInfo := testTypeInfo{42} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - digesterBuilder := &mockDigesterBuilder{} + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) - require.NoError(t, err) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - for i := uint64(0); i < mapSize; i++ { + expectedChildMapValues[k] = v - k, v := Uint64Value(i), Uint64Value(i*10) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - digests := make([]Digest, 2) - if i%2 == 0 { - digests[0] = 0 - } else { - digests[0] = Digest(i % (mapSize / 2)) - } - digests[1] = Digest(i) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - require.Equal(t, uint64(mapSize), m.Count()) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + i := 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - iter, err := m.Iterator() - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - var sortedKeys []Value - keyValues := make(map[Value]Value) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - i := 0 - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + expectedChildMapValues[k] = v - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - i++ - return k, v, err - }) + i++ - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - SetThreshold(256) - defer SetThreshold(1024) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - r := newRand(t) + // Remove elements from child map which triggers standalone map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - maxStringSize := int(maxInlineMapKeySize - 2) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - typeInfo := testTypeInfo{42} + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - digesterBuilder := &mockDigesterBuilder{} + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) - require.NoError(t, err) + delete(expectedChildMapValues, k) - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - k = NewStringValue(randStr(r, maxStringSize)) - v = NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") - v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + t.Run("parent is root data slab, with two child maps", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 9 + valueStringSize = 4 + ) - iter, err := m.Iterator() - require.NoError(t, err) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - var sortedKeys []Value - keyValues := make(map[Value]Value) + r := newRand(t) + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - return k, v, err - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + + expectedParentSize := parentMap.root.ByteSize() - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) -} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) -func TestMapNestedStorables(t *testing.T) { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - t.Run("SomeValue", func(t *testing.T) { + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - const mapSize = 4096 + expectedChildMapValues[k] = v - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + // Test parent slab size + expectedParentSize += expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + i := 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - keyValues[k] = v + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) + expectedChildMapValues[k] = v - t.Run("Array", func(t *testing.T) { + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - const mapSize = 4096 + i++ - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + // Subtract inlined child map size from expected parent size + expectedParentSize -= uint32(inlinedMapDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()-1) + // Add slab id storable size to expected parent size + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Create a nested array with one element - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - err = array.Append(v) - require.NoError(t, err) + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + i = 0 + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - // Insert nested array into map - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - keyValues[k] = array + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - existingStorable, err := m.Set(compare, hashInputProvider, k, array) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) -} + delete(expectedChildMapValues, aKey) -func TestMapMaxInlineElement(t *testing.T) { - t.Parallel() + require.Equal(t, 1+mapSize-1-i, getStoredDeltas(storage)) - r := newRand(t) - maxStringSize := int(maxInlineMapKeySize - 2) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + i++ - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - keyValues := make(map[Value]Value) - for len(keyValues) < 2 { - // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - keyValues[k] = v + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - _, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - } + // Subtract slab id storable size from expected parent size + expectedParentSize -= SlabIDStorable(SlabID{}).ByteSize() + // Add expected inlined child map to expected parent size + expectedParentSize += expectedInlinedMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - require.True(t, m.root.IsData()) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Size of root data slab with two elements (key+value pairs) of - // max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab) - require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + // Remove remaining elements from each inlined child map. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) -} + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) -func TestMapString(t *testing.T) { + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - SetThreshold(256) - defer SetThreshold(1024) + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + delete(expectedChildMapValues, k) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, 1, getStoredDeltas(storage)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + expectedParentSize -= expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - want := `[0:0 1:1 2:2]` - require.Equal(t, want, m.String()) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("parent is root metadata slab, with four child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 9 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` - require.Equal(t, want, m.String()) - }) -} + children := getInlinedChildMapsFromParentMap(t, address, parentMap) -func TestMapSlabDump(t *testing.T) { + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - SetThreshold(256) - defer SetThreshold(1024) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildMapValues[k] = v - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentMap.root.IsData()) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", - "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", + expectedChildMapValues[k] = v + + require.False(t, childMap.Inlined()) + + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("inline collision", func(t *testing.T) { - const mapSize = 30 + // Parent map has one root data slab. + // Each child maps has one root data slab. + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + require.True(t, parentMap.root.IsData()) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + var aKey Value + for k := range expectedChildMapValues { + aKey = k + break + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", - "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + delete(expectedChildMapValues, aKey) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("external collision", func(t *testing.T) { - const mapSize = 30 + // Parent map has one metadata slab + 2 data slabs. + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child map is inlined again. + require.False(t, parentMap.root.IsData()) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Remove remaining elements from each inlined child map. + for childKey, child := range children { + childMap := child.m + valueID := child.valueID - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedChildMapValues, k) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", - "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", - "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + require.Equal(t, uint64(mapSize), parentMap.Count()) + for _, child := range children { + require.Equal(t, uint64(0), child.m.Count()) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("key overflow", func(t *testing.T) { + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(mapSize) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + }) +} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) +func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + SetThreshold(256) + defer SetThreshold(1024) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + ) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - t.Run("value overflow", func(t *testing.T) { + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) -func TestMaxCollisionLimitPerDigest(t *testing.T) { - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - t.Run("collision limit 0", func(t *testing.T) { - const mapSize = 1024 + expectedParentSize := parentMap.root.ByteSize() - SetThreshold(256) - defer SetThreshold(1024) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - // Set noncryptographic hash collision limit as 0, - // meaning no collision is allowed at first level. - MaxCollisionLimitPerDigest = uint32(0) + childMap := child.m + cValueID := child.valueID - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + gchildMap := gchild.m + gValueID := gchild.valueID - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + expectedGChildMapValues[k] = v - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("collision limit > 0", func(t *testing.T) { - const mapSize = 1024 + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - SetThreshold(256) - defer SetThreshold(1024) + // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - // Set noncryptographic hash collision limit as 7, - // meaning at most 8 elements in collision group per digest at first level. - MaxCollisionLimitPerDigest = uint32(7) + childMap := child.m + cValueID := child.valueID - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + gchildMap := gchild.m + gValueID := gchild.valueID - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + expectedGChildMapValues[k] = v - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(SlabID{}).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + gchildKeys := make([]Value, 0, len(expectedGChildMapValues)) + for k := range expectedGChildMapValues { + gchildKeys = append(gchildKeys, k) + } + + for _, k := range gchildKeys { + existingMapKey, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKey) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) -} -func TestMapLoadedValueIterator(t *testing.T) { + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers grand child array slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + largeValueStringSize = 40 + ) - SetThreshold(256) - defer SetThreshold(1024) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + encodedLargeValueSize := NewStringValue(strings.Repeat("a", largeValueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + r := newRand(t) - t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, nil) - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - const mapSize = 3 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedParentSize := parentMap.root.ByteSize() - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) - }) + childMap := child.m + cValueID := child.valueID - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + gchildMap := gchild.m + gValueID := gchild.valueID - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - verifyMapLoadedElements(t, m, values) - }) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - t.Run("root data slab with composite values in collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedGChildMapValues[k] = v + + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) - }) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - t.Run("root data slab with composite values in external collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Create parent map with 3 external collision group, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + gchildLargeElementKeys := make(map[Value]Value) // key: child map key, value: gchild map key + // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab + for childKey, child := range children { + require.Equal(t, 1, len(child.children)) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + childMap := child.m + cValueID := child.valueID - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - verifyMapLoadedElements(t, m, values) + gchildMap := gchild.m + gValueID := gchild.valueID - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - nestedArray, ok := v.(*Array) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, largeValueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + expectedGChildMapValues[k] = v + + gchildLargeElementKeys[childKey] = k + + // Grand child map is NOT inlined + require.False(t, gchildMap.Inlined()) + require.Equal(t, valueIDToSlabID(gValueID), gchildMap.SlabID()) // Slab ID is valid for not inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) + + // Test standalone grand child slab size + expectedGrandChildElement1Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildElement2Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedLargeValueSize + expectedGrandChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElement1Size + expectedGrandChildElement2Size + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + expectedChildElementSize + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - // Unload external key from front to back. - for i := 0; i < len(values); i++ { - k := values[i][0] + gchildMap := gchild.m + gValueID := gchild.valueID - s, ok := k.(StringValue) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) + + // Get all grand child map keys with large element key first + keys := make([]Value, 0, len(expectedGChildMapValues)) + keys = append(keys, gchildLargeElementKeys[childKey]) + for k := range expectedGChildMapValues { + if k != gchildLargeElementKeys[childKey] { + keys = append(keys, k) } } - require.NoError(t, keyID.Valid()) + // Remove all elements (large element first) to trigger grand child map being inlined again. + for _, k := range keys { - err := storage.Remove(keyID) - require.NoError(t, err) + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } + + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { + t.Run("parent is root data slab, two child map, one grand child map each, changes to child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) + + expectedParentSize := parentMap.root.ByteSize() + + // Insert 1 elements to grand child map (both child map and grand child map are still inlined). + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } - nestedArray, ok := v.(*Array) + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + expectedGChildMapValues[k] = v + + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedParentSize = parentMap.root.ByteSize() - verifyMapLoadedElements(t, m, values) + // Add 1 element to each child map so child map reaches its max size + for childKey, child := range children { - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + childMap := child.m + cValueID := child.valueID - nestedArray, ok := v.(*Array) + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break + } + + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + expectedChildMapValues[k] = v - t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyMapLoadedElements(t, m, values) + // Test inlined grand child slab size + expectedGrandChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - // Unload external collision group slab from front to back + // Test inlined child slab size + expectedChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } + // Test parent slab size + expectedParentSize += digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is 1 stored slab because child map is inlined. + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Add 1 more element to each child map so child map reaches its max size + i := 0 + for childKey, child := range children { + + childMap := child.m + cValueID := child.valueID + + var gchild *mapInfo + for _, gv := range child.children { + gchild = gv + break } - return a.AddressAsUint64() < b.AddressAsUint64() - }) - for i, id := range externalCollisionSlabIDs { - err := storage.Remove(id) + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) + + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[i*4+4:] - verifyMapLoadedElements(t, m, expectedValues) + expectedChildMapValues[k] = v + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is the same as value ID for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, (1 + i + 1), getStoredDeltas(storage)) + + i++ + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*2 + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There is 1+mapSize stored slab because all child maps are standalone. - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test parent slab size + expectedParentSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (singleElementPrefixSize+digestSize+encodedKeySize+slabIDStorableSize)*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - verifyMapLoadedElements(t, m, values) + expectedParentMapSize := parentMap.root.ByteSize() - // Unload composite element from back to front. - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + // Remove one element from child map which triggers standalone child map slab becomes inlined slab again. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - nestedArray, ok := v.(*Array) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + var aKey Value + for k := range expectedChildMapValues { + if k != gchildKey { + aKey = k + break + } + } - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + // Remove one element + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, aKey) + require.NoError(t, err) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedChildMapValues, aKey) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize = expectedParentMapSize - slabIDStorableSize + expectedChildMapSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) - - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - verifyMapLoadedElements(t, m, values) + // remove remaining elements from child map, except for grand child map + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - // Unload composite element from front to back. - for i := len(values) - 1; i >= 0; i-- { - k := values[i][0] + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID - s, ok := k.(StringValue) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } + keys := make([]Value, 0, len(expectedChildMapValues)-1) + for k := range expectedChildMapValues { + if k != gchildKey { + keys = append(keys, k) } } - require.NoError(t, keyID.Valid()) - - err := storage.Remove(keyID) - require.NoError(t, err) + // Remove all elements, except grand child map + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedChildMapValues, k) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize -= digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(1), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } - }) - t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) + + t.Run("parent is root metadata slab, with four child maps, each child map has grand child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 4 + valueStringSize = 8 ) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - nestedArray, ok := v.(*Array) - require.True(t, ok) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Insert 1 element to grand child map + // Both child map and grand child map are still inlined, but parent map's root slab is metadata slab. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - verifyMapLoadedElements(t, m, values) + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - nestedArray, ok := v.(*Array) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + expectedGChildMapValues[k] = v - t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // Unload external slabs from back to front - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() + require.False(t, parentMap.Inlined()) + require.False(t, parentMap.root.IsData()) + // There is 3 stored slab: parent metadata slab with 2 data slabs (all child and grand child maps are inlined) + require.Equal(t, 3, getStoredDeltas(storage)) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Insert 1 element to grand child map + // - grand child maps are inlined + // - child maps are standalone + // - parent map's root slab is data slab. + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + gchildMap := gchild.m + gValueID := gchild.valueID - for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { - err := storage.Remove(externalCollisionSlabIDs[i]) - require.NoError(t, err) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - expectedValues := values[:i*4] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + expectedGChildMapValues[k] = v - verifyMapLoadedElements(t, m, values) + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - // Unload value in the middle - unloadValueIndex := 1 + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is same as value ID + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - v := values[unloadValueIndex][1] + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + require.False(t, parentMap.Inlined()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) + + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove one element from grand child map to trigger child map inlined again. + // - grand child maps are inlined + // - child maps are inlined + // - parent map root slab is metadata slab + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID + + var gchildKey Value + var gchild *mapInfo + for gk, gv := range child.children { + gchildKey = gk + gchild = gv + break + } + gchildMap := gchild.m + gValueID := gchild.valueID - verifyMapLoadedElements(t, m, values) - }) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedGChildMapValues, ok := expectedChildMapValues[gchildKey].(mapValue) + require.True(t, ok) - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + var aKey Value + for k := range expectedGChildMapValues { + aKey = k + break + } - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Remove one element from grand child map + existingMapKeyStorable, existingMapValueStorable, err := gchildMap.Remove(compare, hashInputProvider, aKey) + require.NoError(t, err) + require.Equal(t, aKey, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) + + delete(expectedGChildMapValues, aKey) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.False(t, parentMap.root.IsData()) + require.Equal(t, 3, getStoredDeltas(storage)) - // Unload key in the middle. - unloadValueIndex := 1 + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - k := values[unloadValueIndex][0] + // Remove all grand child element to trigger + // - child maps are inlined + // - parent map root slab is data slab + for childKey, child := range children { + childMap := child.m + cValueID := child.valueID - s, ok := k.(StringValue) - require.True(t, ok) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } + keys := make([]Value, 0, len(expectedChildMapValues)) + for k := range expectedChildMapValues { + keys = append(keys, k) } - } - require.NoError(t, keyID.Valid()) + // Remove grand children + for _, k := range keys { + existingMapKeyStorable, existingMapValueStorable, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - err := storage.Remove(keyID) - require.NoError(t, err) + // Grand child map is returned as SlabIDStorable, even if it was stored inlined in the parent. + id, ok := existingMapValueStorable.(SlabIDStorable) + require.True(t, ok) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + v, err := id.StoredValue(storage) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) - }) + gchildMap, ok := v.(*OrderedMap) + require.True(t, ok) - t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedGChildMapValues, ok := expectedChildMapValues[k].(mapValue) + require.True(t, ok) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + valueEqual(t, expectedGChildMapValues, gchildMap) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + delete(expectedChildMapValues, k) - // Unload composite element in the middle - for _, unloadValueIndex := range []int{1, 3, 5} { - v := values[unloadValueIndex][1] + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], + require.Equal(t, uint64(0), childMap.Count()) } - verifyMapLoadedElements(t, m, expectedValues) + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + expectedParentMapSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (digestSize+singleElementPrefixSize+encodedKeySize+expectedChildMapSize)*uint32(mapSize) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) }) +} - t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestChildMapWhenParentMapIsModified(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + ) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + r := newRand(t) - verifyMapLoadedElements(t, m, values) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Unload composite value in the middle. - for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { - v := values[unloadValueIndex][1] + parentMapDigesterBuilder := &mockDigesterBuilder{} + parentDigest := 1 - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Create parent map with mock digests + parentMap, err := NewMap(storage, address, parentMapDigesterBuilder, typeInfo) + require.NoError(t, err) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + expectedKeyValues := make(map[Value]Value) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - values[6], - values[8], - values[10], + // Insert 2 child map with digest values of 1 and 3. + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + k := NewStringValue(randStr(r, keyStringSize)) + + digests := []Digest{ + Digest(parentDigest), } - verifyMapLoadedElements(t, m, expectedValues) - }) + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + parentDigest += 2 - t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + expectedKeyValues[k] = mapValue{} - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) + + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - // Unload external slabs in the middle. - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - id := externalCollisionSlabIDs[1] - err := storage.Remove(id) - require.NoError(t, err) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - copy(values[4:], values[8:]) - values = values[:8] + var keysForNonChildMaps []Value - verifyMapLoadedElements(t, m, values) - }) + t.Run("insert elements in parent map", func(t *testing.T) { - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { - storage := newTestPersistentStorage(t) + newDigests := []Digest{ + 0, // insert value at digest 0, so all child map physical positions are moved by +1 + 2, // insert value at digest 2, so second child map physical positions are moved by +1 + 4, // insert value at digest 4, so no child map physical positions are moved + } - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + for _, digest := range newDigests { - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - verifyMapLoadedElements(t, m, values) + digests := []Digest{digest} + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - // At this point, iterator returned first element (v). + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Remove all other nested composite elements (except first element) from storage. - for _, element := range values[1:] { - value := element[1] - nestedArray, ok := value.(*Array) + expectedKeyValues[k] = v + keysForNonChildMaps = append(keysForNonChildMaps, k) + + i := 0 + for childKey, child := range children { + childMap := child.m + childValueID := child.valueID + + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0][0], k) - valueEqual(t, typeInfoComparator, values[0][1], v) - i++ - return true, nil - }) + i++ - require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { - const mapSize = 3 + expectedChildMapValues[k] = v - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map: 1 root data slab - // composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - verifyMapLoadedElements(t, m, values) + t.Run("remove elements from parent map", func(t *testing.T) { + // Remove element at digest 0, so all child map physical position are moved by -1. + // Remove element at digest 2, so only second child map physical position is moved by -1 + // Remove element at digest 4, so no child map physical position is moved by -1 - // Unload composite value - v := values[nestedCompositeIndex][1].(*Array) + for _, k := range keysForNonChildMaps { - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + existingMapKeyStorable, existingMapValueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.NotNil(t, existingMapKeyStorable) + require.NotNil(t, existingMapValueStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + delete(expectedKeyValues, k) - verifyMapLoadedElements(t, m, values) - } - }) + i := 0 + for childKey, child := range children { + childMap := child.m + childValueID := child.valueID - t.Run("root metadata slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildMapValues, ok := expectedKeyValues[childKey].(mapValue) + require.True(t, ok) - const mapSize = 20 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + i++ - verifyMapLoadedElements(t, m, values) - }) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildMapValues[k] = v - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } + }) }) +} - t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createMapWithEmptyChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values : 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + expectedKeyValues := make(map[Value]Value) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - nestedArray, ok := v.(*Array) - require.True(t, ok) + k := getKey() - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedKeyValues[k] = mapValue{} - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + return parentMap, expectedKeyValues +} - nestedArray, ok := v.(*Array) - require.True(t, ok) +func createMapWithEmpty2LevelChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedKeyValues := make(map[Value]Value) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Create grand child map + gchildMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + k := getKey() - // Unload composite element in the middle - for _, index := range []int{4, 14} { + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - v := values[index][1] + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.True(t, gchildMap.Inlined()) + testInlinedMapIDs(t, address, gchildMap) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + expectedKeyValues[k] = mapValue{k: mapValue{}} - verifyMapLoadedElements(t, m, values) - } - }) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) - t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { - const mapSize = 20 + // Test grand child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, gchildMap.root.ByteSize()) - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Test child map slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedChildMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 5, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + testNotInlinedMapIDs(t, address, parentMap) - verifyMapLoadedElements(t, m, values) + return parentMap, expectedKeyValues +} - v := values[nestedCompositeIndex][1].(*Array) +type mapInfo struct { + m *OrderedMap + valueID ValueID + children map[Value]*mapInfo +} - err := storage.Remove(v.SlabID()) - require.NoError(t, err) +func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) map[Value]*mapInfo { - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + children := make(map[Value]*mapInfo) - verifyMapLoadedElements(t, m, values) + err := parentMap.IterateReadOnlyKeys(func(k Value) (bool, error) { + if k == nil { + return false, nil } - }) - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + e, err := parentMap.Get(compare, hashInputProvider, k) + require.NoError(t, err) - const mapSize = 20 + childMap, ok := e.(*OrderedMap) + if !ok { + return true, nil + } - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + if childMap.Inlined() { + testInlinedMapIDs(t, address, childMap) + } else { + testNotInlinedMapIDs(t, address, childMap) + } - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + children[k] = &mapInfo{ + m: childMap, + valueID: childMap.ValueID(), + children: getInlinedChildMapsFromParentMap(t, address, childMap), + } + + return true, nil + }) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + return children +} - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) +func TestMapSetReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Unload data slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 - childHeader := rootMetaDataSlab.childrenHeaders[i] + storage := newTestPersistentStorage(t) - // Get data slab element count before unload it from storage. - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - count := mapDataSlab.elements.Count() + expectedKeyValues := make(map[Value]Value) - err := storage.Remove(childHeader.slabID) + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values = values[count:] - - verifyMapLoadedElements(t, m, values) - } - }) + k := Uint64Value(i) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - const mapSize = 20 + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + err = childArray.Append(v) + require.NoError(t, err) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + expectedChildValues = append(expectedChildValues, v) - verifyMapLoadedElements(t, m, values) + if !childArray.Inlined() { + break + } + } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + expectedKeyValues[k] = expectedChildValues + } - // Unload data slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - // Get data slab element count before unload it from storage - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - count := mapDataSlab.elements.Count() - - err := storage.Remove(childHeader.slabID) + child, err := id.StoredValue(storage) require.NoError(t, err) - values = values[:len(values)-int(count)] + valueEqual(t, expectedKeyValues[k], child) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + expectedKeyValues[k] = Uint64Value(0) } + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 - const mapSize = 20 + storage := newTestPersistentStorage(t) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + expectedKeyValues := make(map[Value]Value) - verifyMapLoadedElements(t, m, values) + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + k := Uint64Value(i) - require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - index := 1 - childHeader := rootMetaDataSlab.childrenHeaders[index] + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - // Get element count from previous data slab - mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) - require.True(t, ok) + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) - countAtIndex0 := mapDataSlab.elements.Count() + expectedKeyValues[k] = arrayValue{v} + } - // Get element count from slab to be unloaded - mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) - require.True(t, ok) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - countAtIndex1 := mapDataSlab.elements.Count() + // Overwrite existing child array value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) - copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) - values = values[:m.Count()-uint64(countAtIndex1)] + child, err := id.StoredValue(storage) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) - }) + valueEqual(t, expectedKeyValues[k], child) - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedKeyValues[k] = Uint64Value(0) - const mapSize = 200 + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + storage := newTestPersistentStorage(t) - // Unload non-root metadata slabs from front to back. - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - childHeader := rootMetaDataSlab.childrenHeaders[i] + expectedKeyValues := make(map[Value]Value) - err := storage.Remove(childHeader.slabID) + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - // Use firstKey to deduce number of elements in slab. - var expectedValues [][2]Value - if i < len(rootMetaDataSlab.childrenHeaders)-1 { - nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] - expectedValues = values[int(nextChildHeader.firstKey):] - } + k := Uint64Value(i) - verifyMapLoadedElements(t, m, expectedValues) - } - }) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues - const mapSize = 200 + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + expectedChildValues[k] = v - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + if !childMap.Inlined() { + break + } + } + } - // Unload non-root metadata slabs from back to front. - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) - err := storage.Remove(childHeader.slabID) + id, ok := existingStorable.(SlabIDStorable) + require.True(t, ok) + + child, err := id.StoredValue(storage) require.NoError(t, err) - // Use firstKey to deduce number of elements in slabs. - values = values[:childHeader.firstKey] + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) - verifyMapLoadedElements(t, m, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 storage := newTestPersistentStorage(t) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + expectedKeyValues := make(map[Value]Value) - verifyMapLoadedElements(t, m, values) + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - r := newRand(t) + k := Uint64Value(i) - // Unload composite element in random position - for len(values) > 0 { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - i := r.Intn(len(values)) + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + expectedChildValues[k] = v + } - v := values[i][1] + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - nestedArray, ok := v.(*Array) + // Overwrite existing child map value + for k := range expectedKeyValues { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) + require.NoError(t, err) + require.NotNil(t, existingStorable) + + id, ok := existingStorable.(SlabIDStorable) require.True(t, ok) - err := storage.Remove(nestedArray.SlabID()) + child, err := id.StoredValue(storage) require.NoError(t, err) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + valueEqual(t, expectedKeyValues[k], child) + + expectedKeyValues[k] = Uint64Value(0) - verifyMapLoadedElements(t, m, values) + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } + + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) +} - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { +func TestMapRemoveReturnedValue(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + t.Run("child array is not inlined", func(t *testing.T) { + const mapSize = 2 - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + storage := newTestPersistentStorage(t) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + expectedKeyValues := make(map[Value]Value) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - } + k := Uint64Value(i) - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + var expectedChildValues arrayValue + for { + v := NewStringValue(strings.Repeat("a", 10)) - for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { - h := nonRootMetaDataSlab.childrenHeaders[i] + err = childArray.Append(v) + require.NoError(t, err) - if len(dataSlabInfos) > 0 { - // Update previous slabInfo.count - dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex - } + expectedChildValues = append(expectedChildValues, v) - dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) + if !childArray.Inlined() { + break + } } - } - r := newRand(t) + expectedKeyValues[k] = expectedChildValues + } - for len(dataSlabInfos) > 0 { - index := r.Intn(len(dataSlabInfos)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - slabToBeRemoved := dataSlabInfos[index] + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - // Update startIndex for all subsequence data slabs - for i := index + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabToBeRemoved.count - } + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - err := storage.Remove(slabToBeRemoved.id) + child, err := id.StoredValue(storage) require.NoError(t, err) - if index == len(dataSlabInfos)-1 { - values = values[:slabToBeRemoved.startIndex] - } else { - copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) - values = values[:len(values)-slabToBeRemoved.count] - } + valueEqual(t, expectedKeyValues[k], child) - copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + err = storage.Remove(SlabID(id)) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + delete(expectedKeyValues, k) } - require.Equal(t, 0, len(values)) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + t.Run("child array is inlined", func(t *testing.T) { + const mapSize = 2 storage := newTestPersistentStorage(t) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + expectedKeyValues := make(map[Value]Value) - verifyMapLoadedElements(t, m, values) + for i := 0; i < mapSize; i++ { + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + k := Uint64Value(i) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + // Insert one element to child array + v := NewStringValue(strings.Repeat("a", 10)) - if i > 0 { - prevMetaDataSlabInfo := metadataSlabInfos[i-1] - prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] + err = childArray.Append(v) + require.NoError(t, err) + require.True(t, childArray.Inlined()) - // Update previous metadata slab count - prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex + expectedKeyValues[k] = arrayValue{v} + } - // Update previous data slab count - prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex - } + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - metadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: int(mheader.firstKey), - } + // Remove child array value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + id, ok := valueStorable.(SlabIDStorable) require.True(t, ok) - children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) - for i, h := range nonRootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: int(h.firstKey), - } - if i > 0 { - children[i-1].count = int(h.firstKey) - children[i-1].startIndex - } - } + child, err := id.StoredValue(storage) + require.NoError(t, err) - metadataSlabInfo.children = children - metadataSlabInfos[i] = metadataSlabInfo + valueEqual(t, expectedKeyValues[k], child) + + delete(expectedKeyValues, k) + + err = storage.Remove(SlabID(id)) + require.NoError(t, err) } - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - r := newRand(t) + t.Run("child map is not inlined", func(t *testing.T) { + const mapSize = 2 - for len(metadataSlabInfos) > 0 { + storage := newTestPersistentStorage(t) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - switch r.Intn(maxSlabType) { + expectedKeyValues := make(map[Value]Value) - case metadataSlabType: + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + k := Uint64Value(i) - isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues - count := slabInfoToBeRemoved.count + // Insert into child map until child map is not inlined + j := 0 + for { + k := Uint64Value(j) + v := NewStringValue(strings.Repeat("a", 10)) + j++ - // Update startIndex for subsequence metadata slabs - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } + expectedChildValues[k] = v + + if !childMap.Inlined() { + break } + } + } - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - case dataSlabType: + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] + child, err := id.StoredValue(storage) + require.NoError(t, err) - dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) + valueEqual(t, expectedKeyValues[k], child) - isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && - (dataSlabIndex == len(metadataSlabInfo.children)-1) + delete(expectedKeyValues, k) - slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - count := slabInfoToBeRemoved.count + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - // Update startIndex for all subsequence data slabs in this metadata slab info - for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { - metadataSlabInfo.children[i].startIndex -= count - } + t.Run("child map is inlined", func(t *testing.T) { + const mapSize = 2 - copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) - metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] + storage := newTestPersistentStorage(t) - metadataSlabInfo.count -= count + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + expectedKeyValues := make(map[Value]Value) - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - if len(metadataSlabInfo.children) == 0 { - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - } - } + k := Uint64Value(i) - err := storage.Remove(slabInfoToBeRemoved.id) + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) + require.Nil(t, existingStorable) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + expectedChildValues := make(mapValue) + expectedKeyValues[k] = expectedChildValues + + // Insert into child map until child map is not inlined + v := NewStringValue(strings.Repeat("a", 10)) + + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - verifyMapLoadedElements(t, m, values) + expectedChildValues[k] = v } - require.Equal(t, 0, len(values)) - }) -} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) -func createMapWithLongStringKey( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, -) (*OrderedMap, [][2]Value) { + // Remove child map value + for k := range expectedKeyValues { + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - digesterBuilder := &mockDigesterBuilder{} + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - // Create parent map. - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + child, err := id.StoredValue(storage) + require.NoError(t, err) - expectedValues := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { - s := strings.Repeat(string(r), int(maxInlineMapElementSize)) + valueEqual(t, expectedKeyValues[k], child) - k := NewStringValue(s) - v := Uint64Value(i) + delete(expectedKeyValues, k) - expectedValues[i] = [2]Value{k, v} + err = storage.Remove(SlabID(id)) + require.NoError(t, err) + } - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) +} - existingStorable, err := m.Set(compare, hashInputProvider, k, v) +func TestMapWithOutdatedCallback(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("overwritten child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - require.Nil(t, existingStorable) - r++ - } + expectedKeyValues := make(mapValue) - return m, expectedValues -} + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) -func createMapWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + k := Uint64Value(0) - digesterBuilder := &mockDigesterBuilder{} + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) + require.NoError(t, err) + require.Nil(t, existingStorable) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + v := NewStringValue(strings.Repeat("a", 10)) - expectedValues := make([][2]Value, size) - r := rune('a') - for i := 0; i < size; i++ { - k := Uint64Value(i) - v := NewStringValue(strings.Repeat(string(r), 20)) + err = childArray.Append(v) + require.NoError(t, err) - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + expectedKeyValues[k] = arrayValue{v} - expectedValues[i] = [2]Value{k, v} + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) + // Overwrite child array value from parent + valueStorable, err := parentMap.Set(compare, hashInputProvider, k, Uint64Value(0)) require.NoError(t, err) - require.Nil(t, existingStorable) - } - return m, expectedValues -} + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) -func createMapWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + child, err := id.StoredValue(storage) + require.NoError(t, err) + + valueEqual(t, expectedKeyValues[k], child) - // Use mockDigesterBuilder to guarantee element order. - digesterBuilder := &mockDigesterBuilder{} + expectedKeyValues[k] = Uint64Value(0) - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) - expectedValues := make([][2]Value, size) - for i := 0; i < size; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // modify overwritten child array + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) + + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) + }) + + t.Run("removed child array", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - k := Uint64Value(i) - v := nested + expectedKeyValues := make(mapValue) - expectedValues[i] = [2]Value{k, v} + // Create child array + childArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - //digests := []Digest{Digest(i)} - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + k := Uint64Value(0) - // Set nested array to parent - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Insert child array to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childArray) require.NoError(t, err) require.Nil(t, existingStorable) - } - return m, expectedValues -} + v := NewStringValue(strings.Repeat("a", 10)) -func createMapWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - compositeValueIndex int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + err = childArray.Append(v) + require.NoError(t, err) - digesterBuilder := &mockDigesterBuilder{} + expectedKeyValues[k] = arrayValue{v} - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + testMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - values := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { + // Remove child array value from parent + keyStorable, valueStorable, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, keyStorable, k) - k := Uint64Value(i) + id, ok := valueStorable.(SlabIDStorable) + require.True(t, ok) - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + child, err := id.StoredValue(storage) + require.NoError(t, err) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + valueEqual(t, expectedKeyValues[k], child) - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + delete(expectedKeyValues, k) - values[i] = [2]Value{k, a} - } else { - values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} - } + // childArray.parentUpdater isn't nil before callback is invoked. + require.NotNil(t, childArray.parentUpdater) - existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) + // modify removed child array + err = childArray.Append(Uint64Value(0)) require.NoError(t, err) - require.Nil(t, existingStorable) - } - return m, values -} + // childArray.parentUpdater is nil after callback is invoked. + require.Nil(t, childArray.parentUpdater) -func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i][0], k) - valueEqual(t, typeInfoComparator, expectedValues[i][1], v) - i++ - return true, nil + // No-op on parent + valueEqual(t, expectedKeyValues, parentMap) }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) } -func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*MapMetaDataSlab); ok { - counter++ - } - } - return counter -} +func TestMapSetType(t *testing.T) { + typeInfo := testTypeInfo{42} + newTypeInfo := testTypeInfo{43} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} -func TestMaxInlineMapValueSize(t *testing.T) { + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) - t.Run("small key", func(t *testing.T) { - // Value has larger max inline size when key is less than max map key size. + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.True(t, m.root.IsData()) - SetThreshold(256) - defer SetThreshold(1024) + seed := m.root.ExtraData().Seed - mapSize := 2 - keyStringSize := 16 // Key size is less than max map key size. - valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, seed, m.root.ExtraData().Seed) - r := newRand(t) + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, keyStringSize)) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) + }) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + t.Run("data slab root", func(t *testing.T) { storage := newTestPersistentStorage(t) m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + mapSize := 10 + for i := 0; i < mapSize; i++ { + v := Uint64Value(i) + existingStorable, err := m.Set(compare, hashInputProvider, v, v) require.NoError(t, err) require.Nil(t, existingStorable) } - // Both key and value are stored in map slab. - require.Equal(t, 1, len(storage.deltas)) - - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) - - t.Run("max size key", func(t *testing.T) { - // Value max size is about half of max map element size when key is exactly max map key size. + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.True(t, m.root.IsData()) - SetThreshold(256) - defer SetThreshold(1024) + seed := m.root.ExtraData().Seed - mapSize := 1 - keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). - valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, seed, m.root.ExtraData().Seed) - r := newRand(t) + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) + }) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + t.Run("metadata slab root", func(t *testing.T) { storage := newTestPersistentStorage(t) m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + mapSize := 10_000 + for i := 0; i < mapSize; i++ { + v := Uint64Value(i) + existingStorable, err := m.Set(compare, hashInputProvider, v, v) require.NoError(t, err) require.Nil(t, existingStorable) } - // Key is stored in map slab, while value is stored separately in storable slab. - require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, typeInfo, m.Type()) + require.False(t, m.root.IsData()) + + seed := m.root.ExtraData().Seed + + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, seed, m.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testExistingMapSetType(t, m.SlabID(), storage.baseStorage, newTypeInfo, m.Count(), seed) }) - t.Run("large key", func(t *testing.T) { - // Value has larger max inline size when key is more than max map key size because - // when key size exceeds max map key size, it is stored in a separate storable slab, - // and SlabIDStorable is stored as key in the map, which is 19 bytes. + t.Run("inlined in parent container root data slab", func(t *testing.T) { + storage := newTestPersistentStorage(t) - SetThreshold(256) - defer SetThreshold(1024) + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - mapSize := 1 - keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size - valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - r := newRand(t) + childMapSeed := childMap.root.ExtraData().Seed - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(0), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, uint64(1), parentMap.Count()) + require.Equal(t, typeInfo, parentMap.Type()) + require.True(t, parentMap.root.IsData()) + require.False(t, parentMap.Inlined()) + + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, typeInfo, childMap.Type()) + require.True(t, childMap.root.IsData()) + require.True(t, childMap.Inlined()) + + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, childMapSeed, childMap.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + testExistingInlinedMapSetType( + t, + parentMap.SlabID(), + Uint64Value(0), + storage.baseStorage, + newTypeInfo, + childMap.Count(), + childMapSeed, + ) + }) + + t.Run("inlined in parent container non-root data slab", func(t *testing.T) { storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + childMapSeed := childMap.root.ExtraData().Seed + + mapSize := 10_000 + for i := 0; i < mapSize-1; i++ { + v := Uint64Value(i) + existingStorable, err := parentMap.Set(compare, hashInputProvider, v, v) require.NoError(t, err) require.Nil(t, existingStorable) } - // Key is stored in separate storable slabs, while value is stored in map slab. - require.Equal(t, 2, len(storage.deltas)) + existingStorable, err := parentMap.Set(compare, hashInputProvider, Uint64Value(mapSize-1), childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.Equal(t, typeInfo, parentMap.Type()) + require.False(t, parentMap.root.IsData()) + require.False(t, parentMap.Inlined()) + + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, typeInfo, childMap.Type()) + require.True(t, childMap.root.IsData()) + require.True(t, childMap.Inlined()) + + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, uint64(0), childMap.Count()) + require.Equal(t, childMapSeed, childMap.root.ExtraData().Seed) + + // Commit modified slabs in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + testExistingInlinedMapSetType( + t, + parentMap.SlabID(), + Uint64Value(mapSize-1), + storage.baseStorage, + newTypeInfo, + childMap.Count(), + childMapSeed, + ) }) } -func TestMapID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} +func testExistingMapSetType( + t *testing.T, + id SlabID, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, + expectedSeed uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + + // Load existing map by ID + m, err := NewMapWithRootID(storage, id, newBasicDigesterBuilder()) require.NoError(t, err) + require.Equal(t, expectedCount, m.Count()) + require.Equal(t, expectedTypeInfo, m.Type()) + require.Equal(t, expectedSeed, m.root.ExtraData().Seed) - sid := m.SlabID() - id := m.ValueID() + // Modify type info of existing map + err = m.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, m.Count()) + require.Equal(t, newTypeInfo, m.Type()) + require.Equal(t, expectedSeed, m.root.ExtraData().Seed) - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Load existing map again from storage + m2, err := NewMapWithRootID(storage2, id, newBasicDigesterBuilder()) + require.NoError(t, err) + require.Equal(t, expectedCount, m2.Count()) + require.Equal(t, newTypeInfo, m2.Type()) + require.Equal(t, expectedSeed, m2.root.ExtraData().Seed) } -func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { - const ( - mapSize = 3 - keyStringSize = 16 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) +func testExistingInlinedMapSetType( + t *testing.T, + parentID SlabID, + inlinedChildKey Value, + baseStorage BaseStorage, + expectedTypeInfo testTypeInfo, + expectedCount uint64, + expectedSeed uint64, +) { + newTypeInfo := testTypeInfo{value: expectedTypeInfo.value + 1} - keyValues := make(map[Value]*mutableValue, mapSize) - for i := 0; i < mapSize; i++ { - k := Uint64Value(i) - v := newMutableValue(initialStorableSize) - keyValues[k] = v - } + // Create storage from existing data + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + // Load existing map by ID + parentMap, err := NewMapWithRootID(storage, parentID, newBasicDigesterBuilder()) + require.NoError(t, err) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + element, err := parentMap.Get(compare, hashInputProvider, inlinedChildKey) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + childMap, ok := element.(*OrderedMap) + require.True(t, ok) - require.True(t, m.root.IsData()) + require.Equal(t, expectedCount, childMap.Count()) + require.Equal(t, expectedTypeInfo, childMap.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) - expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize - expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + // Modify type info of existing map + err = childMap.SetType(newTypeInfo) + require.NoError(t, err) + require.Equal(t, expectedCount, childMap.Count()) + require.Equal(t, newTypeInfo, childMap.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + // Commit data in storage + err = storage.FastCommit(runtime.NumCPU()) require.NoError(t, err) - // Reset mutable values after changing its storable size - for k, v := range keyValues { - v.updateStorableSize(mutatedStorableSize) + // Create storage from existing data + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + // Load existing map again from storage + parentMap2, err := NewMapWithRootID(storage2, parentID, newBasicDigesterBuilder()) + require.NoError(t, err) - require.True(t, m.root.IsData()) + element2, err := parentMap2.Get(compare, hashInputProvider, inlinedChildKey) + require.NoError(t, err) - expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize - expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + childMap2, ok := element2.(*OrderedMap) + require.True(t, ok) - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + require.Equal(t, expectedCount, childMap2.Count()) + require.Equal(t, newTypeInfo, childMap2.Type()) + require.Equal(t, expectedSeed, childMap.root.ExtraData().Seed) } diff --git a/storable.go b/storable.go index d200c9b..59e41ce 100644 --- a/storable.go +++ b/storable.go @@ -37,19 +37,55 @@ type Storable interface { ChildStorables() []Storable } -type containerStorable interface { +// ComparableStorable is an interface that supports comparison and cloning of Storable. +// This is only used for compact keys. +type ComparableStorable interface { Storable - hasPointer() bool + + // Equal returns true if the given storable is equal to this storable. + Equal(Storable) bool + + // Less returns true if the given storable is less than this storable. + Less(Storable) bool + + // ID returns a unique identifier. + ID() string + + Copy() Storable +} + +// ContainerStorable is an interface that supports Storable containing other storables. +type ContainerStorable interface { + Storable + + // HasPointer returns true if any of its child storables is SlabIDStorable + // (references to another slab). This function is used during encoding. + HasPointer() bool } func hasPointer(storable Storable) bool { - if cs, ok := storable.(containerStorable); ok { - return cs.hasPointer() + if cs, ok := storable.(ContainerStorable); ok { + return cs.HasPointer() } return false } const ( + // WARNING: tag numbers defined in here in github.com/onflow/atree + // MUST not overlap with tag numbers used by Cadence internal value encoding. + // As of Oct. 2, 2023, Cadence uses tag numbers from 128 to 224. + // See runtime/interpreter/encode.go at github.com/onflow/cadence. + + CBORTagTypeInfoRef = 246 + + CBORTagInlinedArrayExtraData = 247 + CBORTagInlinedMapExtraData = 248 + CBORTagInlinedCompactMapExtraData = 249 + + CBORTagInlinedArray = 250 + CBORTagInlinedMap = 251 + CBORTagInlinedCompactMap = 252 + CBORTagInlineCollisionGroup = 253 CBORTagExternalCollisionGroup = 254 @@ -58,9 +94,9 @@ const ( type SlabIDStorable SlabID -var _ Storable = SlabIDStorable{} +var _ ContainerStorable = SlabIDStorable{} -func (v SlabIDStorable) hasPointer() bool { +func (v SlabIDStorable) HasPointer() bool { return true } @@ -126,12 +162,11 @@ func (v SlabIDStorable) String() string { return fmt.Sprintf("SlabIDStorable(%d)", v) } -// Encode is a wrapper for Storable.Encode() -func Encode(storable Storable, encMode cbor.EncMode) ([]byte, error) { +func EncodeSlab(slab Slab, encMode cbor.EncMode) ([]byte, error) { var buf bytes.Buffer enc := NewEncoder(&buf, encMode) - err := storable.Encode(enc) + err := slab.Encode(enc) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") diff --git a/storable_slab.go b/storable_slab.go index de799ff..162c258 100644 --- a/storable_slab.go +++ b/storable_slab.go @@ -50,10 +50,9 @@ func NewStorableSlab(storage SlabStorage, address Address, storable Storable) (S storable: storable, } - err = storage.Store(id, slab) + err = storeSlab(storage, slab) if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + return nil, err } return SlabIDStorable(id), nil @@ -93,6 +92,10 @@ func (s *StorableSlab) Encode(enc *Encoder) error { return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode storable") } + if enc.hasInlinedExtraData() { + return NewEncodingError(fmt.Errorf("failed to encode storable slab because storable contains inlined array/map")) + } + return nil } diff --git a/storable_test.go b/storable_test.go index 341e054..6072e83 100644 --- a/storable_test.go +++ b/storable_test.go @@ -34,6 +34,7 @@ const ( cborTagUInt32Value = 163 cborTagUInt64Value = 164 cborTagSomeValue = 165 + cborTagHashableMap = 166 ) type HashableValue interface { @@ -333,6 +334,7 @@ type StringValue struct { var _ Value = StringValue{} var _ Storable = StringValue{} var _ HashableValue = StringValue{} +var _ ComparableStorable = StringValue{} func NewStringValue(s string) StringValue { size := GetUintCBORSize(uint64(len(s))) + uint32(len(s)) @@ -345,6 +347,28 @@ func (v StringValue) StoredValue(_ SlabStorage) (Value, error) { return v, nil } +func (v StringValue) Equal(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str == other.(StringValue).str +} + +func (v StringValue) Less(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str < other.(StringValue).str +} + +func (v StringValue) ID() string { + return v.str +} + +func (v StringValue) Copy() Storable { + return v +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { @@ -430,7 +454,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id SlabID, inlinedExtraData []ExtraData) (Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -451,6 +475,15 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } switch tagNumber { + case CBORTagInlinedArray: + return DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedMap: + return DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedCompactMap: + return DecodeInlinedCompactMapStorable(dec, decodeStorable, id, inlinedExtraData) + case CBORTagSlabID: return DecodeSlabIDStorable(dec) @@ -492,7 +525,7 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { return Uint64Value(n), nil case cborTagSomeValue: - storable, err := decodeStorable(dec, id) + storable, err := decodeStorable(dec, id, inlinedExtraData) if err != nil { return nil, err } @@ -507,12 +540,43 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } func decodeTypeInfo(dec *cbor.StreamDecoder) (TypeInfo, error) { - value, err := dec.DecodeUint64() + t, err := dec.NextType() if err != nil { return nil, err } - return testTypeInfo{value: value}, nil + switch t { + case cbor.UintType: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testTypeInfo{value: value}, nil + + case cbor.TagType: + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, err + } + + switch tagNum { + case testCompositeTypeInfoTagNum: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testCompositeTypeInfo{value: value}, nil + + default: + return nil, fmt.Errorf("failed to decode type info") + } + + default: + return nil, fmt.Errorf("failed to decode type info") + } + } func compare(storage SlabStorage, value Value, storable Storable) (bool, error) { @@ -571,6 +635,19 @@ func compare(storage SlabStorage, value Value, storable Storable) (bool, error) } return compare(storage, v.Value, other.Storable) + + case *HashableMap: + other, err := storable.StoredValue(storage) + if err != nil { + return false, err + } + + otherMap, ok := other.(*OrderedMap) + if !ok { + return false, nil + } + + return v.m.ValueID() == otherMap.ValueID(), nil } return false, fmt.Errorf("value %T not supported for comparison", value) @@ -635,11 +712,11 @@ type SomeStorable struct { Storable Storable } -var _ Storable = SomeStorable{} +var _ ContainerStorable = SomeStorable{} -func (v SomeStorable) hasPointer() bool { - if ms, ok := v.Storable.(containerStorable); ok { - return ms.hasPointer() +func (v SomeStorable) HasPointer() bool { + if ms, ok := v.Storable.(ContainerStorable); ok { + return ms.HasPointer() } return false } @@ -677,25 +754,25 @@ func (v SomeStorable) String() string { return fmt.Sprintf("%s", v.Storable) } -type mutableValue struct { +type testMutableValue struct { storable *mutableStorable } -var _ Value = &mutableValue{} +var _ Value = &testMutableValue{} -func newMutableValue(storableSize uint32) *mutableValue { - return &mutableValue{ +func newTestMutableValue(storableSize uint32) *testMutableValue { + return &testMutableValue{ storable: &mutableStorable{ size: storableSize, }, } } -func (v *mutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { +func (v *testMutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { return v.storable, nil } -func (v *mutableValue) updateStorableSize(n uint32) { +func (v *testMutableValue) updateStorableSize(n uint32) { v.storable.size = n } @@ -710,7 +787,7 @@ func (s *mutableStorable) ByteSize() uint32 { } func (s *mutableStorable) StoredValue(SlabStorage) (Value, error) { - return &mutableValue{s}, nil + return &testMutableValue{s}, nil } func (*mutableStorable) ChildStorables() []Storable { @@ -721,3 +798,47 @@ func (*mutableStorable) Encode(*Encoder) error { // no-op for testing return nil } + +type HashableMap struct { + m *OrderedMap +} + +var _ Value = &HashableMap{} +var _ HashableValue = &HashableMap{} + +func NewHashableMap(m *OrderedMap) *HashableMap { + return &HashableMap{m} +} + +func (v *HashableMap) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { + return v.m.Storable(storage, address, maxInlineSize) +} + +func (v *HashableMap) HashInput(scratch []byte) ([]byte, error) { + const ( + cborTypeByteString = 0x40 + + valueIDLength = len(ValueID{}) + cborTagNumSize = 2 + cborByteStringHeadSize = 1 + cborByteStringSize = valueIDLength + hashInputSize = cborTagNumSize + cborByteStringHeadSize + cborByteStringSize + ) + + var buf []byte + if len(scratch) >= hashInputSize { + buf = scratch[:hashInputSize] + } else { + buf = make([]byte, hashInputSize) + } + + // CBOR tag number + buf[0], buf[1] = 0xd8, cborTagHashableMap + + // CBOR byte string head + buf[2] = cborTypeByteString | byte(valueIDLength) + + vid := v.m.ValueID() + copy(buf[3:], vid[:]) + return buf, nil +} diff --git a/storage.go b/storage.go index 64c7f3a..b4cf7af 100644 --- a/storage.go +++ b/storage.go @@ -21,19 +21,50 @@ package atree import ( "bytes" "encoding/binary" + "errors" "fmt" "sort" "strings" "sync" + "unsafe" "github.com/fxamacker/cbor/v2" ) const LedgerBaseStorageSlabPrefix = "$" -// ValueID identifies Array and OrderedMap. -type ValueID [16]byte +// ValueID identifies an Array or OrderedMap. ValueID is consistent +// independent of inlining status, while ValueID and SlabID are used +// differently despite having the same size and content under the hood. +// By contrast, SlabID is affected by inlining because it identifies +// a slab in storage. Given this, ValueID should be used for +// resource tracking, etc. +type ValueID [unsafe.Sizeof(Address{}) + unsafe.Sizeof(SlabIndex{})]byte + +var emptyValueID = ValueID{} + +func slabIDToValueID(sid SlabID) ValueID { + var id ValueID + n := copy(id[:], sid.address[:]) + copy(id[n:], sid.index[:]) + return id +} + +func (vid ValueID) equal(sid SlabID) bool { + return bytes.Equal(vid[:len(sid.address)], sid.address[:]) && + bytes.Equal(vid[len(sid.address):], sid.index[:]) +} + +func (vid ValueID) String() string { + return fmt.Sprintf( + "0x%x.%d", + binary.BigEndian.Uint64(vid[:8]), + binary.BigEndian.Uint64(vid[8:]), + ) +} +// WARNING: Any changes to SlabID or its components (Address and SlabIndex) +// require updates to ValueID definition and functions. type ( Address [8]byte SlabIndex [8]byte @@ -360,7 +391,7 @@ func (s *BasicSlabStorage) SlabIDs() []SlabID { func (s *BasicSlabStorage) Encode() (map[SlabID][]byte, error) { m := make(map[SlabID][]byte) for id, slab := range s.Slabs { - b, err := Encode(slab, s.cborEncMode) + b, err := EncodeSlab(slab, s.cborEncMode) if err != nil { // err is already categorized by Encode(). return nil, err @@ -448,6 +479,8 @@ func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map atLeastOneExternalSlab = true } + // This handles inlined slab because inlined slab is a child storable (s) and + // we traverse s.ChildStorables() for its inlined elements. next = append(next, s.ChildStorables()...) } @@ -574,6 +607,11 @@ func (s *PersistentSlabStorage) SlabIterator() (SlabIterator, error) { slabIDStorable, ok := childStorable.(SlabIDStorable) if !ok { + // Append child storables of this childStorable to handle inlined slab containing SlabIDStorable. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) continue } @@ -739,12 +777,17 @@ func (s *PersistentSlabStorage) sortedOwnedDeltaKeys() []SlabID { } func (s *PersistentSlabStorage) Commit() error { - var err error // this part ensures the keys are sorted so commit operation is deterministic keysWithOwners := s.sortedOwnedDeltaKeys() - for _, id := range keysWithOwners { + return s.commit(keysWithOwners) +} + +func (s *PersistentSlabStorage) commit(keys []SlabID) error { + var err error + + for _, id := range keys { slab := s.deltas[id] // deleted slabs @@ -763,7 +806,7 @@ func (s *PersistentSlabStorage) Commit() error { } // serialize - data, err := Encode(slab, s.cborEncMode) + data, err := EncodeSlab(slab, s.cborEncMode) if err != nil { // err is categorized already by Encode() return err @@ -842,7 +885,7 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { continue } // serialize - data, err := Encode(slab, s.cborEncMode) + data, err := EncodeSlab(slab, s.cborEncMode) results <- &encodedSlabs{ slabID: id, data: data, @@ -873,7 +916,7 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { // process the results while encoders are working // we need to capture them inside a map // again so we can apply them in order of keys - encSlabByID := make(map[SlabID][]byte) + encSlabByID := make(map[SlabID][]byte, len(keysWithOwners)) for i := 0; i < len(keysWithOwners); i++ { result := <-results // if any error return @@ -926,6 +969,206 @@ func (s *PersistentSlabStorage) FastCommit(numWorkers int) error { return nil } +// NondeterministicFastCommit commits changed slabs in nondeterministic order. +// Encoded slab data is deterministic (e.g. array and map iteration is deterministic). +// IMPORTANT: This function is used by migration programs when commit order of slabs +// is not required to be deterministic (while preserving deterministic array and map iteration). +func (s *PersistentSlabStorage) NondeterministicFastCommit(numWorkers int) error { + // No changes + if len(s.deltas) == 0 { + return nil + } + + type slabToBeEncoded struct { + slabID SlabID + slab Slab + } + + type encodedSlab struct { + slabID SlabID + data []byte + err error + } + + // Define encoder (worker) to encode slabs in parallel + encoder := func( + wg *sync.WaitGroup, + done <-chan struct{}, + jobs <-chan slabToBeEncoded, + results chan<- encodedSlab, + ) { + defer wg.Done() + + for job := range jobs { + // Check if goroutine is signaled to stop before proceeding. + select { + case <-done: + return + default: + } + + id := job.slabID + slab := job.slab + + if slab == nil { + results <- encodedSlab{ + slabID: id, + data: nil, + err: nil, + } + continue + } + + // Serialize + data, err := EncodeSlab(slab, s.cborEncMode) + results <- encodedSlab{ + slabID: id, + data: data, + err: err, + } + } + } + + // slabIDsWithOwner contains slab IDs with owner: + // - modified slab IDs are stored from front to back + // - deleted slab IDs are stored from back to front + // This is to avoid extra allocations. + slabIDsWithOwner := make([]SlabID, len(s.deltas)) + + // Modified slabs need to be encoded (in parallel) and stored in underlying storage. + modifiedSlabCount := 0 + // Deleted slabs need to be removed from underlying storage. + deletedSlabCount := 0 + for id, slab := range s.deltas { + // Ignore slabs not owned by accounts + if id.address == AddressUndefined { + continue + } + if slab == nil { + // Set deleted slab ID from the end of slabIDsWithOwner. + index := len(slabIDsWithOwner) - 1 - deletedSlabCount + slabIDsWithOwner[index] = id + deletedSlabCount++ + } else { + // Set modified slab ID from the start of slabIDsWithOwner. + slabIDsWithOwner[modifiedSlabCount] = id + modifiedSlabCount++ + } + } + + modifiedSlabIDs := slabIDsWithOwner[:modifiedSlabCount] + + deletedSlabIDs := slabIDsWithOwner[len(slabIDsWithOwner)-deletedSlabCount:] + + if modifiedSlabCount == 0 && deletedSlabCount == 0 { + return nil + } + + if modifiedSlabCount < 2 { + // Avoid goroutine overhead. + // Return after committing modified and deleted slabs. + ids := modifiedSlabIDs + ids = append(ids, deletedSlabIDs...) + return s.commit(ids) + } + + if numWorkers > modifiedSlabCount { + numWorkers = modifiedSlabCount + } + + var wg sync.WaitGroup + + // Create done signal channel + done := make(chan struct{}) + + // Create job queue + jobs := make(chan slabToBeEncoded, modifiedSlabCount) + + // Create result queue + results := make(chan encodedSlab, modifiedSlabCount) + + defer func() { + // This ensures that all goroutines are stopped before output channel is closed. + + // Wait for all goroutines to finish + wg.Wait() + + // Close output channel + close(results) + }() + + // Launch workers to encode slabs + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go encoder(&wg, done, jobs, results) + } + + // Send jobs + for _, id := range modifiedSlabIDs { + jobs <- slabToBeEncoded{id, s.deltas[id]} + } + close(jobs) + + // Remove deleted slabs from underlying storage. + for _, id := range deletedSlabIDs { + + err := s.baseStorage.Remove(id) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", id)) + } + + // Deleted slabs are removed from deltas and added to read cache so that: + // 1. next read is from in-memory read cache + // 2. deleted slabs are not re-committed in next commit + s.cache[id] = nil + delete(s.deltas, id) + } + + // Process encoded slabs + for i := 0; i < modifiedSlabCount; i++ { + result := <-results + + if result.err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // result.err is already categorized by Encode(). + return result.err + } + + id := result.slabID + data := result.data + + if data == nil { + // Closing done channel signals goroutines to stop. + close(done) + // This is unexpected because deleted slabs are processed separately. + return NewEncodingErrorf("unexpectd encoded empty data") + } + + // Store + err := s.baseStorage.Store(id, data) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + } + + s.cache[id] = s.deltas[id] + // It's safe to remove slab from deltas because + // iteration is on non-temp slabs and temp slabs + // are still in deltas. + delete(s.deltas, id) + } + + // Do NOT reset deltas because slabs with empty address are not saved. + + return nil +} + func (s *PersistentSlabStorage) DropDeltas() { s.deltas = make(map[SlabID]Slab) } @@ -989,12 +1232,18 @@ func (s *PersistentSlabStorage) Retrieve(id SlabID) (Slab, bool, error) { } func (s *PersistentSlabStorage) Store(id SlabID, slab Slab) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to store slab with undefined slab ID") + } // add to deltas s.deltas[id] = slab return nil } func (s *PersistentSlabStorage) Remove(id SlabID) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to remove slab with undefined slab ID") + } // add to nil to deltas under that id s.deltas[id] = nil return nil @@ -1035,3 +1284,458 @@ func (s *PersistentSlabStorage) DeltasSizeWithoutTempAddresses() uint64 { } return size } + +func storeSlab(storage SlabStorage, slab Slab) error { + id := slab.SlabID() + err := storage.Store(id, slab) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", id)) + } + return nil +} + +// FixLoadedBrokenReferences traverses loaded slabs and fixes broken references in maps. +// A broken reference is a SlabID referencing a non-existent slab. +// To fix a map containing broken references, this function replaces broken map with +// empty map having the same SlabID and also removes all slabs in the old map. +// Limitations: +// - only fix broken references in map +// - only traverse loaded slabs in deltas and cache +// NOTE: The intended use case is to enable migration programs in onflow/flow-go to +// fix broken references. As of April 2024, only 10 registers in testnet (not mainnet) +// were found to have broken references and they seem to have resulted from a bug +// that was fixed 2 years ago by https://github.com/onflow/cadence/pull/1565. +func (s *PersistentSlabStorage) FixLoadedBrokenReferences(needToFix func(old Value) bool) ( + fixedSlabIDs map[SlabID][]SlabID, // key: root slab ID, value: slab IDs containing broken refs + skippedSlabIDs map[SlabID][]SlabID, // key: root slab ID, value: slab IDs containing broken refs + err error, +) { + + // parentOf is used to find root slab from non-root slab. + // Broken reference can be in non-root slab, and we need SlabID of root slab + // to replace broken map by creating an empty new map with same SlabID. + parentOf := make(map[SlabID]SlabID) + + getRootSlabID := func(id SlabID) SlabID { + for { + parentID, ok := parentOf[id] + if ok { + id = parentID + } else { + return id + } + } + } + + hasBrokenReferenceInSlab := func(id SlabID, slab Slab) bool { + if slab == nil { + return false + } + + switch slab.(type) { + case *ArrayMetaDataSlab, *MapMetaDataSlab: // metadata slabs + var foundBrokenRef bool + + for _, childStorable := range slab.ChildStorables() { + + if slabIDStorable, ok := childStorable.(SlabIDStorable); ok { + + childID := SlabID(slabIDStorable) + + // Track parent-child relationship of root slabs and non-root slabs. + parentOf[childID] = id + + if !s.existIfLoaded(childID) { + foundBrokenRef = true + } + + // Continue with remaining child storables to track parent-child relationship. + } + } + + return foundBrokenRef + + default: // data slabs + childStorables := slab.ChildStorables() + + for len(childStorables) > 0 { + + var nextChildStorables []Storable + + for _, childStorable := range childStorables { + + if slabIDStorable, ok := childStorable.(SlabIDStorable); ok { + + if !s.existIfLoaded(SlabID(slabIDStorable)) { + return true + } + + continue + } + + // Append child storables of this childStorable to + // handle nested SlabIDStorable, such as Cadence SomeValue. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) + } + + childStorables = nextChildStorables + } + + return false + } + } + + var brokenSlabIDs []SlabID + + // Iterate delta slabs. + for id, slab := range s.deltas { + if hasBrokenReferenceInSlab(id, slab) { + brokenSlabIDs = append(brokenSlabIDs, id) + } + } + + // Iterate cache slabs. + for id, slab := range s.cache { + if _, ok := s.deltas[id]; ok { + continue + } + if hasBrokenReferenceInSlab(id, slab) { + brokenSlabIDs = append(brokenSlabIDs, id) + } + } + + if len(brokenSlabIDs) == 0 { + return nil, nil, nil + } + + rootSlabIDsWithBrokenData := make(map[SlabID][]SlabID) + var errs []error + + // Find SlabIDs of root slab for slabs containing broken references. + for _, id := range brokenSlabIDs { + rootID := getRootSlabID(id) + if rootID == SlabIDUndefined { + errs = append(errs, fmt.Errorf("failed to get root slab id for slab %s", id)) + continue + } + rootSlabIDsWithBrokenData[rootID] = append(rootSlabIDsWithBrokenData[rootID], id) + } + + for rootSlabID, brokenSlabIDs := range rootSlabIDsWithBrokenData { + rootSlab := s.RetrieveIfLoaded(rootSlabID) + if rootSlab == nil { + errs = append(errs, fmt.Errorf("failed to retrieve loaded root slab %s", rootSlabID)) + continue + } + + switch rootSlab := rootSlab.(type) { + case MapSlab: + value, err := rootSlab.StoredValue(s) + if err != nil { + errs = append(errs, fmt.Errorf("failed to convert slab %s into value", rootSlab.SlabID())) + continue + } + + if needToFix(value) { + err := s.fixBrokenReferencesInMap(rootSlab) + if err != nil { + errs = append(errs, err) + continue + } + } else { + if skippedSlabIDs == nil { + skippedSlabIDs = make(map[SlabID][]SlabID) + } + skippedSlabIDs[rootSlabID] = brokenSlabIDs + } + + default: + // IMPORTANT: Only handle map slabs for now. DO NOT silently fix currently unknown problems. + errs = append(errs, fmt.Errorf("failed to fix broken references in non-map slab %s (%T)", rootSlab.SlabID(), rootSlab)) + } + } + + for id := range skippedSlabIDs { + delete(rootSlabIDsWithBrokenData, id) + } + + return rootSlabIDsWithBrokenData, skippedSlabIDs, errors.Join(errs...) +} + +// fixBrokenReferencesInMap replaces replaces broken map with empty map +// having the same SlabID and also removes all slabs in the old map. +func (s *PersistentSlabStorage) fixBrokenReferencesInMap(old MapSlab) error { + id := old.SlabID() + + oldExtraData := old.ExtraData() + + // Create an empty map with the same StorgeID, type, and seed as the old map. + new := &MapDataSlab{ + header: MapSlabHeader{ + slabID: id, + size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + }, + extraData: &MapExtraData{ + TypeInfo: oldExtraData.TypeInfo, + Seed: oldExtraData.Seed, + }, + elements: newHkeyElements(0), + } + + // Store new empty map with the same SlabID. + err := s.Store(id, new) + if err != nil { + return err + } + + // Remove all slabs and references in old map. + references, _, err := s.getAllChildReferences(old) + if err != nil { + return err + } + + for _, childID := range references { + err = s.Remove(childID) + if err != nil { + return err + } + } + + return nil +} + +func (s *PersistentSlabStorage) existIfLoaded(id SlabID) bool { + // Check deltas. + if slab, ok := s.deltas[id]; ok { + return slab != nil + } + + // Check read cache. + if slab, ok := s.cache[id]; ok { + return slab != nil + } + + return false +} + +// GetAllChildReferences returns child references of given slab (all levels), +// including nested container and theirs child references. +func (s *PersistentSlabStorage) GetAllChildReferences(id SlabID) ( + references []SlabID, + brokenReferences []SlabID, + err error, +) { + slab, found, err := s.Retrieve(id) + if err != nil { + return nil, nil, err + } + if !found { + return nil, nil, NewSlabNotFoundErrorf(id, fmt.Sprintf("failed to get root slab by id %s", id)) + } + return s.getAllChildReferences(slab) +} + +// getAllChildReferences returns child references of given slab (all levels). +func (s *PersistentSlabStorage) getAllChildReferences(slab Slab) ( + references []SlabID, + brokenReferences []SlabID, + err error, +) { + childStorables := slab.ChildStorables() + + for len(childStorables) > 0 { + + var nextChildStorables []Storable + + for _, childStorable := range childStorables { + + slabIDStorable, ok := childStorable.(SlabIDStorable) + if !ok { + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) + + continue + } + + childID := SlabID(slabIDStorable) + + childSlab, ok, err := s.Retrieve(childID) + if err != nil { + return nil, nil, err + } + if !ok { + brokenReferences = append(brokenReferences, childID) + continue + } + + references = append(references, childID) + + nextChildStorables = append( + nextChildStorables, + childSlab.ChildStorables()..., + ) + } + + childStorables = nextChildStorables + } + + return references, brokenReferences, nil +} + +// BatchPreload decodeds and caches slabs of given ids in parallel. +// This is useful for storage health or data validation in migration programs. +func (s *PersistentSlabStorage) BatchPreload(ids []SlabID, numWorkers int) error { + if len(ids) == 0 { + return nil + } + + // Use 11 for min slab count for parallel decoding because micro benchmarks showed + // performance regression for <= 10 slabs when decoding slabs in parallel. + const minCountForBatchPreload = 11 + if len(ids) < minCountForBatchPreload { + + for _, id := range ids { + // fetch from base storage last + data, ok, err := s.baseStorage.Retrieve(id) + if err != nil { + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) + } + if !ok { + continue + } + + slab, err := DecodeSlab(id, data, s.cborDecMode, s.DecodeStorable, s.DecodeTypeInfo) + if err != nil { + // err is already categorized by DecodeSlab(). + return err + } + + // save decoded slab to cache + s.cache[id] = slab + } + + return nil + } + + type slabToBeDecoded struct { + slabID SlabID + data []byte + } + + type decodedSlab struct { + slabID SlabID + slab Slab + err error + } + + // Define decoder (worker) to decode slabs in parallel + decoder := func(wg *sync.WaitGroup, done <-chan struct{}, jobs <-chan slabToBeDecoded, results chan<- decodedSlab) { + defer wg.Done() + + for slabData := range jobs { + // Check if goroutine is signaled to stop before proceeding. + select { + case <-done: + return + default: + } + + id := slabData.slabID + data := slabData.data + + slab, err := DecodeSlab(id, data, s.cborDecMode, s.DecodeStorable, s.DecodeTypeInfo) + // err is already categorized by DecodeSlab(). + results <- decodedSlab{ + slabID: id, + slab: slab, + err: err, + } + } + } + + if numWorkers > len(ids) { + numWorkers = len(ids) + } + + var wg sync.WaitGroup + + // Construct done signal channel + done := make(chan struct{}) + + // Construct job queue + jobs := make(chan slabToBeDecoded, len(ids)) + + // Construct result queue + results := make(chan decodedSlab, len(ids)) + + defer func() { + // This ensures that all goroutines are stopped before output channel is closed. + + // Wait for all goroutines to finish + wg.Wait() + + // Close output channel + close(results) + }() + + // Preallocate cache map if empty + if len(s.cache) == 0 { + s.cache = make(map[SlabID]Slab, len(ids)) + } + + // Launch workers + wg.Add(numWorkers) + for i := 0; i < numWorkers; i++ { + go decoder(&wg, done, jobs, results) + } + + // Send jobs + jobCount := 0 + { + // Need to close input channel (jobs) here because + // if there isn't any job in jobs channel, + // done is never processed inside loop "for slabData := range jobs". + defer close(jobs) + + for _, id := range ids { + // fetch from base storage last + data, ok, err := s.baseStorage.Retrieve(id) + if err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // Wrap err as external error (if needed) because err is returned by BaseStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to retrieve slab %s", id)) + } + if !ok { + continue + } + + jobs <- slabToBeDecoded{id, data} + jobCount++ + } + } + + // Process results + for i := 0; i < jobCount; i++ { + result := <-results + + if result.err != nil { + // Closing done channel signals goroutines to stop. + close(done) + // result.err is already categorized by DecodeSlab(). + return result.err + } + + // save decoded slab to cache + s.cache[result.slabID] = result.slab + } + + return nil +} diff --git a/storage_bench_test.go b/storage_bench_test.go new file mode 100644 index 0000000..0736b75 --- /dev/null +++ b/storage_bench_test.go @@ -0,0 +1,253 @@ +/* + * Atree - Scalable Arrays and Ordered Maps + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package atree + +import ( + "encoding/binary" + "math/rand" + "runtime" + "strconv" + "testing" + + "github.com/fxamacker/cbor/v2" + "github.com/stretchr/testify/require" +) + +func benchmarkFastCommit(b *testing.B, seed int64, numberOfSlabs int) { + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + slabs := make([]Slab, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slabs[i] = generateLargeSlab(id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + for _, slab := range slabs { + err = storage.Store(slab.SlabID(), slab) + require.NoError(b, err) + } + + b.StartTimer() + + err := storage.FastCommit(runtime.NumCPU()) + require.NoError(b, err) + } + }) +} + +func benchmarkNondeterministicFastCommit(b *testing.B, seed int64, numberOfSlabs int) { + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + slabs := make([]Slab, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slabs[i] = generateLargeSlab(id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + for _, slab := range slabs { + err = storage.Store(slab.SlabID(), slab) + require.NoError(b, err) + } + + b.StartTimer() + + err := storage.NondeterministicFastCommit(runtime.NumCPU()) + require.NoError(b, err) + } + }) +} + +func BenchmarkStorageFastCommit(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkFastCommit(b, fixedSeed, 10) + benchmarkFastCommit(b, fixedSeed, 100) + benchmarkFastCommit(b, fixedSeed, 1_000) + benchmarkFastCommit(b, fixedSeed, 10_000) + benchmarkFastCommit(b, fixedSeed, 100_000) + benchmarkFastCommit(b, fixedSeed, 1_000_000) +} + +func BenchmarkStorageNondeterministicFastCommit(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkNondeterministicFastCommit(b, fixedSeed, 10) + benchmarkNondeterministicFastCommit(b, fixedSeed, 100) + benchmarkNondeterministicFastCommit(b, fixedSeed, 1_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 10_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 100_000) + benchmarkNondeterministicFastCommit(b, fixedSeed, 1_000_000) +} + +func benchmarkRetrieve(b *testing.B, seed int64, numberOfSlabs int) { + + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + encodedSlabs := make(map[SlabID][]byte) + ids := make([]SlabID, 0, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slab := generateLargeSlab(id) + + data, err := EncodeSlab(slab, encMode) + require.NoError(b, err) + + encodedSlabs[id] = data + ids = append(ids, id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + b.StartTimer() + + for _, id := range ids { + _, found, err := storage.Retrieve(id) + require.True(b, found) + require.NoError(b, err) + } + } + }) +} + +func benchmarkBatchPreload(b *testing.B, seed int64, numberOfSlabs int) { + + r := rand.New(rand.NewSource(seed)) + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(b, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(b, err) + + encodedSlabs := make(map[SlabID][]byte) + ids := make([]SlabID, 0, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + addr := generateRandomAddress(r) + + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := SlabID{addr, index} + + slab := generateLargeSlab(id) + + data, err := EncodeSlab(slab, encMode) + require.NoError(b, err) + + encodedSlabs[id] = data + ids = append(ids, id) + } + + b.Run(strconv.Itoa(numberOfSlabs), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + b.StartTimer() + + err = storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(b, err) + + for _, id := range ids { + _, found, err := storage.Retrieve(id) + require.True(b, found) + require.NoError(b, err) + } + } + }) +} + +func BenchmarkStorageRetrieve(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkRetrieve(b, fixedSeed, 10) + benchmarkRetrieve(b, fixedSeed, 100) + benchmarkRetrieve(b, fixedSeed, 1_000) + benchmarkRetrieve(b, fixedSeed, 10_000) + benchmarkRetrieve(b, fixedSeed, 100_000) + benchmarkRetrieve(b, fixedSeed, 1_000_000) +} + +func BenchmarkStorageBatchPreload(b *testing.B) { + fixedSeed := int64(1234567) // intentionally use fixed constant rather than time, etc. + + benchmarkBatchPreload(b, fixedSeed, 10) + benchmarkBatchPreload(b, fixedSeed, 100) + benchmarkBatchPreload(b, fixedSeed, 1_000) + benchmarkBatchPreload(b, fixedSeed, 10_000) + benchmarkBatchPreload(b, fixedSeed, 100_000) + benchmarkBatchPreload(b, fixedSeed, 1_000_000) +} diff --git a/storage_test.go b/storage_test.go index 0b5a0f6..bba9461 100644 --- a/storage_test.go +++ b/storage_test.go @@ -19,6 +19,7 @@ package atree import ( + "encoding/binary" "errors" "math/rand" "runtime" @@ -412,8 +413,8 @@ func TestBasicSlabStorageStore(t *testing.T) { r := newRand(t) address := Address{1} slabs := map[SlabID]Slab{ - {address, SlabIndex{1}}: generateRandomSlab(address, r), - {address, SlabIndex{2}}: generateRandomSlab(address, r), + {address, SlabIndex{1}}: generateRandomSlab(SlabID{address, SlabIndex{1}}, r), + {address, SlabIndex{2}}: generateRandomSlab(SlabID{address, SlabIndex{2}}, r), } // Store values @@ -424,7 +425,7 @@ func TestBasicSlabStorageStore(t *testing.T) { // Overwrite stored values for id := range slabs { - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) slabs[id] = slab err := storage.Store(id, slab) require.NoError(t, err) @@ -446,7 +447,7 @@ func TestBasicSlabStorageRetrieve(t *testing.T) { r := newRand(t) id := SlabID{Address{1}, SlabIndex{1}} - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) // Retrieve value from empty storage retrievedSlab, found, err := storage.Retrieve(id) @@ -476,7 +477,7 @@ func TestBasicSlabStorageRemove(t *testing.T) { r := newRand(t) id := SlabID{Address{1}, SlabIndex{1}} - slab := generateRandomSlab(id.address, r) + slab := generateRandomSlab(id, r) // Remove value from empty storage err := storage.Remove(id) @@ -546,7 +547,7 @@ func TestBasicSlabStorageSlabIDs(t *testing.T) { // Store values for id := range wantIDs { - err := storage.Store(id, generateRandomSlab(id.address, r)) + err := storage.Store(id, generateRandomSlab(id, r)) require.NoError(t, err) } @@ -569,9 +570,9 @@ func TestBasicSlabStorageSlabIterat(t *testing.T) { id3 := SlabID{address: address, index: index.Next()} want := map[SlabID]Slab{ - id1: generateRandomSlab(id1.address, r), - id2: generateRandomSlab(id2.address, r), - id3: generateRandomSlab(id3.address, r), + id1: generateRandomSlab(id1, r), + id2: generateRandomSlab(id2, r), + id3: generateRandomSlab(id3, r), } storage := NewBasicSlabStorage(nil, nil, nil, nil) @@ -642,8 +643,8 @@ func TestPersistentStorage(t *testing.T) { permSlabID, err := NewSlabIDFromRawBytes([]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) require.NoError(t, err) - slab1 := generateRandomSlab(tempSlabID.address, r) - slab2 := generateRandomSlab(permSlabID.address, r) + slab1 := generateRandomSlab(tempSlabID, r) + slab2 := generateRandomSlab(permSlabID, r) // no temp ids should be in the base storage err = storage.Store(tempSlabID, slab1) @@ -724,8 +725,10 @@ func TestPersistentStorage(t *testing.T) { numberOfSlabsPerAccount := 10 r := newRand(t) + baseStorage := newAccessOrderTrackerBaseStorage() storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + baseStorage2 := newAccessOrderTrackerBaseStorage() storageWithFastCommit := NewPersistentSlabStorage(baseStorage2, encMode, decMode, nil, nil) @@ -735,21 +738,24 @@ func TestPersistentStorage(t *testing.T) { for i := 0; i < numberOfAccounts; i++ { for j := 0; j < numberOfSlabsPerAccount; j++ { addr := generateRandomAddress(r) - slab := generateRandomSlab(addr, r) - slabSize += uint64(slab.ByteSize()) slabID, err := storage.GenerateSlabID(addr) require.NoError(t, err) + + slab := generateRandomSlab(slabID, r) + slabSize += uint64(slab.ByteSize()) + err = storage.Store(slabID, slab) require.NoError(t, err) slabID2, err := storageWithFastCommit.GenerateSlabID(addr) require.NoError(t, err) + err = storageWithFastCommit.Store(slabID2, slab) require.NoError(t, err) // capture data for accuracy testing - simpleMap[slabID], err = Encode(slab, encMode) + simpleMap[slabID], err = EncodeSlab(slab, encMode) require.NoError(t, err) } } @@ -900,7 +906,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { data := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] id1: { - // extra data // version 0x10, // extra data flag @@ -970,7 +975,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { // (data slab) next: 0, data: [0] id4: { - // extra data // version 0x10, // extra data flag @@ -1002,7 +1006,7 @@ func TestPersistentStorageSlabIterator(t *testing.T) { break } - encodedSlab, err := Encode(slab, storage.cborEncMode) + encodedSlab, err := EncodeSlab(slab, storage.cborEncMode) require.NoError(t, err) require.Equal(t, encodedSlab, data[id]) @@ -1044,12 +1048,12 @@ func TestPersistentStorageGenerateSlabID(t *testing.T) { }) } -func generateRandomSlab(address Address, r *rand.Rand) Slab { +func generateRandomSlab(id SlabID, r *rand.Rand) Slab { storable := Uint64Value(r.Uint64()) return &ArrayDataSlab{ header: ArraySlabHeader{ - slabID: NewSlabID(address, SlabIndex{1}), + slabID: id, size: arrayRootDataSlabPrefixSize + storable.ByteSize(), count: 1, }, @@ -1057,6 +1061,28 @@ func generateRandomSlab(address Address, r *rand.Rand) Slab { } } +func generateLargeSlab(id SlabID) Slab { + + const elementCount = 100 + + storables := make([]Storable, elementCount) + size := uint32(0) + for i := 0; i < elementCount; i++ { + storable := Uint64Value(uint64(i)) + size += storable.ByteSize() + storables[i] = storable + } + + return &ArrayDataSlab{ + header: ArraySlabHeader{ + slabID: id, + size: arrayRootDataSlabPrefixSize + size, + count: elementCount, + }, + elements: storables, + } +} + func generateRandomAddress(r *rand.Rand) Address { address := Address{} r.Read(address[:]) @@ -1242,3 +1268,3786 @@ func (s slowStorable) Encode(encoder *Encoder) error { runtime.KeepAlive(n) return s.Uint8Value.Encode(encoder) } + +func TestFixLoadedBrokenReferences(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("healthy", func(t *testing.T) { + + // Create a health storage with arrays and maps + mapMetaDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + mapDataNonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + mapDataNonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + nestedArrayID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + emptyMapDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + mapDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 6}} + + emptyArrayDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 7}} + + arrayDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 8}} + + arrayMetaDataRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 9}} + arrayDataNonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} + arrayDataNonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + nestedArrayID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 12}} + + rootIDs := []SlabID{ + mapMetaDataRootID, + emptyMapDataRootID, + mapDataRootID, + emptyArrayDataRootID, + arrayDataRootID, + arrayMetaDataRootID, + } + + data := map[SlabID][]byte{ + // root map metadata slab + // metadata slab + mapMetaDataRootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + mapDataNonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + mapDataNonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + nestedArrayID: { + // extra data + // version + 0x00, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // flag: root + array data + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + + // empty map + emptyMapDataRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + + // root map data slab + mapDataRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + + // empty array + emptyArrayDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + }, + + // root array data slab + arrayDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + + // root array metadata slab + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + arrayMetaDataRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + arrayDataNonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + arrayDataNonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + }, + + // (data slab) next: 0, data: [0] + nestedArrayID2: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + rootIDSet, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(rootIDSet)) + + for _, rootID := range rootIDs { + _, found := rootIDSet[rootID] + require.True(t, found) + } + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, 0, len(skippedRootIDs)) + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, 0, len(skippedRootIDs)) + + // No data is modified during fixing broken reference + require.Equal(t, 0, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDSet, err = CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(rootIDSet)) + + }) + + t.Run("broken root map data slab", func(t *testing.T) { + + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + brokenRefs := map[SlabID][]SlabID{ + rootID: {rootID}, + } + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [SlabID(0x0.1):uint64(0)] + 0x82, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0xd8, 0xa4, 0x00, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("broken nested storable in root map data slab", func(t *testing.T) { + + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + brokenRefs := map[SlabID][]SlabID{ + rootID: {rootID}, + } + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SomeValue(SlabID(0x0.1))] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("broken non-root map data slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + brokenRefs := map[SlabID][]SlabID{ + rootID: {nonRootDataID2}, + } + + // Expected serialized slab data with storage id + data := map[SlabID][]byte{ + + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, 1, len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 3, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("multiple data slabs with broken reference in the same map", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + brokenRefs := map[SlabID][]SlabID{ + rootID: {nonRootDataID1, nonRootDataID2}, + } + + data := map[SlabID][]byte{ + + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + fixedData := map[SlabID][]byte{ + rootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 3, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[rootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 1, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID], savedData) + }) + + t.Run("broken reference in nested container", func(t *testing.T) { + parentContainerRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + nestedContainerRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + brokenRefs := map[SlabID][]SlabID{ + nestedContainerRootID: {nestedContainerRootID}, + } + + data := map[SlabID][]byte{ + + // metadata slab + parentContainerRootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // map data slab + nestedContainerRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + // map data slab + nestedContainerRootID: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab (0x0.1) not found: slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Fix broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, len(brokenRefs), len(fixedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, fixedRootIDs[rootID]) + } + + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing broken reference + rootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, 1, len(rootIDs)) + + _, ok := rootIDs[parentContainerRootID] + require.True(t, ok) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 4, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(nestedContainerRootID) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[nestedContainerRootID], savedData) + }) + + t.Run("selectively fix maps", func(t *testing.T) { + rootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootDataID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootDataID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} // containing broken ref + + rootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // containing broken ref + + rootIDs := []SlabID{rootID1, rootID2} + + brokenRefs := map[SlabID][]SlabID{ + rootID1: {nonRootDataID2}, + rootID2: {rootID2}, + } + + data := map[SlabID][]byte{ + // metadata slab + rootID1: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootDataID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootDataID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + + // map data slab + rootID2: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x4a, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):SlabID(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + }, + } + + fixedData := map[SlabID][]byte{ + rootID1: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + + rootID2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x4a, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x00, + }, + } + + storage := newTestPersistentStorageWithData(t, data) + + // Load data in storage + for id := range data { + _, found, err := storage.Retrieve(id) + require.NoError(t, err) + require.True(t, found) + } + + // Check health before fixing broken reference + _, err := CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + var fixedRootIDs map[SlabID][]SlabID + var skippedRootIDs map[SlabID][]SlabID + + // Don't fix any broken references + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(_ Value) bool { + return false + }) + require.NoError(t, err) + require.Equal(t, 0, len(fixedRootIDs)) + require.Equal(t, len(brokenRefs), len(skippedRootIDs)) + + for rootID, slabIDsWithBrokenRef := range brokenRefs { + require.ElementsMatch(t, slabIDsWithBrokenRef, skippedRootIDs[rootID]) + } + + // No data is modified because no fix happened + require.Equal(t, 0, len(storage.deltas)) + + // Only fix one map with broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(v Value) bool { + m, ok := v.(*OrderedMap) + require.True(t, ok) + return rootID1 == m.SlabID() + }) + require.NoError(t, err) + require.Equal(t, 1, len(fixedRootIDs)) + require.Equal(t, brokenRefs[rootID1], fixedRootIDs[rootID1]) + require.Equal(t, 1, len(skippedRootIDs)) + require.Equal(t, brokenRefs[rootID2], skippedRootIDs[rootID2]) + require.Equal(t, 3, len(storage.deltas)) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check health after only fixing one map with broken reference + _, err = CheckStorageHealth(storage, -1) + require.ErrorContains(t, err, "slab not found during slab iteration") + + // Fix remaining map with broken reference + fixedRootIDs, skippedRootIDs, err = storage.FixLoadedBrokenReferences(func(v Value) bool { + return true + }) + require.NoError(t, err) + require.Equal(t, 1, len(fixedRootIDs)) + require.Equal(t, brokenRefs[rootID2], fixedRootIDs[rootID2]) + require.Equal(t, 0, len(skippedRootIDs)) + require.Equal(t, 1, len(storage.deltas)) + + // Check health after fixing remaining maps with broken reference + returnedRootIDs, err := CheckStorageHealth(storage, -1) + require.NoError(t, err) + require.Equal(t, len(rootIDs), len(returnedRootIDs)) + + // Save data in storage + err = storage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, 0, len(storage.deltas)) + + // Check encoded data + baseStorage := storage.baseStorage.(*InMemBaseStorage) + require.Equal(t, 2, len(baseStorage.segments)) + + savedData, found, err := baseStorage.Retrieve(rootID1) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID1], savedData) + + savedData, found, err = baseStorage.Retrieve(rootID2) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, fixedData[rootID2], savedData) + }) +} + +func TestGetAllChildReferencesFromArray(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab without refs", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref to nested element", func(t *testing.T) { + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref in nested storable", func(t *testing.T) { + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with broken ref", func(t *testing.T) { + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{childRootID} + + data := map[SlabID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{nonRootID2} + expectedBrokenRefIDs := []SlabID{nonRootID1} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with ref", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{childRootID} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // extra data flag + 0x81, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array meta data slab flag + 0x81, + // child header count + 0x00, 0x02, + // child header 1 (storage id, count, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0x00, 0x00, 0xe4, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + nonRootID1: { + // version + 0x00, + // array data slab flag + 0x00, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + nonRootID2: { + // version + 0x00, + // array data slab flag + 0x40, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("3-level of nested containers", func(t *testing.T) { + parentRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{childRootID, gchildRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + parentRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + gchildRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, parentRootID, expectedRefIDs, expectedBrokenRefIDs) + }) +} + +func TestGetAllChildReferencesFromMap(t *testing.T) { + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab without refs", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with ref in nested storable", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, cborTagSomeValue, 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root data slab with broken ref", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedRefIDs := []SlabID{} + expectedBrokenRefIDs := []SlabID{childRootID} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to first data slab", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{nonRootID2} + expectedBrokenRefIDs := []SlabID{nonRootID1} + + data := map[SlabID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with ref", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2, childRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + childRootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 0 + 0x00, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 0 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("root metadata slab with broken ref to nested element", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + nonRootID1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + nonRootID2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + expectedRefIDs := []SlabID{nonRootID1, nonRootID2} + expectedBrokenRefIDs := []SlabID{childRootID} + + data := map[SlabID][]byte{ + // metadata slab + rootID: { + // extra data + // version + 0x00, + // flag: root + map meta + 0x89, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + meta + 0x89, + // child header count + 0x00, 0x02, + // child header 1 (storage id, first key, size) + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x02, + // child header 2 + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0xfe, + }, + + // data slab + nonRootID1: { + // version + 0x00, + // flag: map data + 0x08, + // next storage id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + nonRootID2: { + // version + 0x00, + // flag: has pointer + map data + 0x48, + // next storage id + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:hhhhhhhhhhhhhhhhhhhhhh] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) + + t.Run("3-level containers", func(t *testing.T) { + rootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + childRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + gchildRootID := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + expectedRefIDs := []SlabID{childRootID, gchildRootID} + expectedBrokenRefIDs := []SlabID{} + + data := map[SlabID][]byte{ + rootID: { + // extra data + // version + 0x00, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2a, + // count: 1 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // version + 0x00, + // flag: root + map data + 0x88, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x5b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x9b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // element: [uint64(0):uint64(0)] + 0x82, + 0xd8, 0xa4, 0x00, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + childRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + gchildRootID: { + // extra data + // version + 0x00, + // extra data flag + 0x80, + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // version + 0x00, + // array data slab flag + 0x80, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x01, + // CBOR encoded array elements + 0xd8, 0xa4, 0x00, + }, + } + + testGetAllChildReferences(t, data, rootID, expectedRefIDs, expectedBrokenRefIDs) + }) +} + +func testGetAllChildReferences( + t *testing.T, + data map[SlabID][]byte, + rootID SlabID, + expectedRefIDs []SlabID, + expectedBrokenRefIDs []SlabID, +) { + storage := newTestPersistentStorageWithData(t, data) + + refs, brokenRefs, err := storage.GetAllChildReferences(rootID) + require.NoError(t, err) + + require.Equal(t, len(expectedRefIDs), len(refs)) + require.ElementsMatch(t, expectedRefIDs, refs) + + require.Equal(t, len(expectedBrokenRefIDs), len(brokenRefs)) + require.ElementsMatch(t, expectedBrokenRefIDs, brokenRefs) +} + +func TestStorageNondeterministicFastCommit(t *testing.T) { + t.Run("0 slabs", func(t *testing.T) { + numberOfAccounts := 0 + numberOfSlabsPerAccount := 0 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("1 slabs", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 1 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10 slabs", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 10 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("100 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 10 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10_000 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 1_000 + testStorageNondeterministicFastCommit(t, numberOfAccounts, numberOfSlabsPerAccount) + }) +} + +func testStorageNondeterministicFastCommit(t *testing.T, numberOfAccounts int, numberOfSlabsPerAccount int) { + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, nil, nil) + + encodedSlabs := make(map[SlabID][]byte) + slabSize := uint64(0) + + // Storage slabs + for i := 0; i < numberOfAccounts; i++ { + + addr := generateRandomAddress(r) + + for j := 0; j < numberOfSlabsPerAccount; j++ { + + slabID, err := storage.GenerateSlabID(addr) + require.NoError(t, err) + + slab := generateRandomSlab(slabID, r) + slabSize += uint64(slab.ByteSize()) + + err = storage.Store(slabID, slab) + require.NoError(t, err) + + // capture data for accuracy testing + encodedSlabs[slabID], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + } + } + + require.Equal(t, uint(len(encodedSlabs)), storage.DeltasWithoutTempAddresses()) + require.Equal(t, slabSize, storage.DeltasSizeWithoutTempAddresses()) + + // Commit deltas + err = storage.NondeterministicFastCommit(10) + require.NoError(t, err) + + require.Equal(t, uint(0), storage.DeltasWithoutTempAddresses()) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + require.Equal(t, len(encodedSlabs), storage.Count()) + + // Compare encoded data + for sid, value := range encodedSlabs { + storedValue, found, err := baseStorage.Retrieve(sid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, value, storedValue) + } + + // Remove all slabs from storage + for sid := range encodedSlabs { + err = storage.Remove(sid) + require.NoError(t, err) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + } + + // Commit deltas + err = storage.NondeterministicFastCommit(10) + require.NoError(t, err) + + require.Equal(t, 0, storage.Count()) + require.Equal(t, uint64(0), storage.DeltasSizeWithoutTempAddresses()) + + // Check remove functionality + for sid := range encodedSlabs { + storedValue, found, err := storage.Retrieve(sid) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, storedValue) + } +} + +func TestStorageBatchPreload(t *testing.T) { + t.Run("0 slab", func(t *testing.T) { + numberOfAccounts := 0 + numberOfSlabsPerAccount := 0 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("1 slab", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 1 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10 slab", func(t *testing.T) { + numberOfAccounts := 1 + numberOfSlabsPerAccount := 10 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("100 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 10 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) + + t.Run("10_000 slabs", func(t *testing.T) { + numberOfAccounts := 10 + numberOfSlabsPerAccount := 1_000 + testStorageBatchPreload(t, numberOfAccounts, numberOfSlabsPerAccount) + }) +} + +func testStorageBatchPreload(t *testing.T, numberOfAccounts int, numberOfSlabsPerAccount int) { + + indexesByAddress := make(map[Address]uint64) + + generateSlabID := func(address Address) SlabID { + nextIndex := indexesByAddress[address] + 1 + + var idx SlabIndex + binary.BigEndian.PutUint64(idx[:], nextIndex) + + indexesByAddress[address] = nextIndex + + return NewSlabID(address, idx) + } + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + encodedSlabs := make(map[SlabID][]byte) + + // Generate and encode slabs + for i := 0; i < numberOfAccounts; i++ { + + addr := generateRandomAddress(r) + + for j := 0; j < numberOfSlabsPerAccount; j++ { + + slabID := generateSlabID(addr) + + slab := generateRandomSlab(slabID, r) + + encodedSlabs[slabID], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + } + } + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + ids := make([]SlabID, 0, len(encodedSlabs)) + for id := range encodedSlabs { + ids = append(ids, id) + } + + // Batch preload slabs from base storage + err = storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + require.Equal(t, len(encodedSlabs), len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + + // Compare encoded data + for id, data := range encodedSlabs { + cachedData, err := EncodeSlab(storage.cache[id], encMode) + require.NoError(t, err) + + require.Equal(t, cachedData, data) + } +} + +func TestStorageBatchPreloadNotFoundSlabs(t *testing.T) { + + encMode, err := cbor.EncOptions{}.EncMode() + require.NoError(t, err) + + decMode, err := cbor.DecOptions{}.DecMode() + require.NoError(t, err) + + r := newRand(t) + + t.Run("empty storage", func(t *testing.T) { + const numberOfSlabs = 10 + + ids := make([]SlabID, numberOfSlabs) + for i := 0; i < numberOfSlabs; i++ { + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + ids[i] = NewSlabID(generateRandomAddress(r), index) + } + + baseStorage := NewInMemBaseStorage() + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + err := storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + }) + + t.Run("non-empty storage", func(t *testing.T) { + const numberOfSlabs = 10 + + ids := make([]SlabID, numberOfSlabs) + encodedSlabs := make(map[SlabID][]byte) + + for i := 0; i < numberOfSlabs; i++ { + var index SlabIndex + binary.BigEndian.PutUint64(index[:], uint64(i)) + + id := NewSlabID(generateRandomAddress(r), index) + + slab := generateRandomSlab(id, r) + + encodedSlabs[id], err = EncodeSlab(slab, encMode) + require.NoError(t, err) + + ids[i] = id + } + + // Append a slab ID that doesn't exist in storage. + ids = append(ids, NewSlabID(generateRandomAddress(r), SlabIndex{numberOfSlabs})) + + baseStorage := NewInMemBaseStorageFromMap(encodedSlabs) + storage := NewPersistentSlabStorage(baseStorage, encMode, decMode, decodeStorable, decodeTypeInfo) + + err := storage.BatchPreload(ids, runtime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, len(encodedSlabs), len(storage.cache)) + require.Equal(t, 0, len(storage.deltas)) + + // Compare encoded data + for id, data := range encodedSlabs { + cachedData, err := EncodeSlab(storage.cache[id], encMode) + require.NoError(t, err) + + require.Equal(t, cachedData, data) + } + }) +} diff --git a/typeinfo.go b/typeinfo.go index b2b3c08..ef7f218 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -19,11 +19,20 @@ package atree import ( + "bytes" + "encoding/binary" + "fmt" + "sort" + "strings" + "sync" + "github.com/fxamacker/cbor/v2" ) type TypeInfo interface { Encode(*cbor.StreamEncoder) error + IsComposite() bool + Copy() TypeInfo } type TypeInfoDecoder func( @@ -32,3 +41,642 @@ type TypeInfoDecoder func( TypeInfo, error, ) + +// encodeTypeInfo encodes TypeInfo either: +// - as is (for TypeInfo in root slab extra data section), or +// - as index of inlined TypeInfos (for TypeInfo in inlined slab extra data section) +type encodeTypeInfo func(*Encoder, TypeInfo) error + +// defaultEncodeTypeInfo encodes TypeInfo as is. +func defaultEncodeTypeInfo(enc *Encoder, typeInfo TypeInfo) error { + return typeInfo.Encode(enc.CBOR) +} + +func decodeTypeInfoRefIfNeeded(inlinedTypeInfo []TypeInfo, defaultTypeInfoDecoder TypeInfoDecoder) TypeInfoDecoder { + if len(inlinedTypeInfo) == 0 { + return defaultTypeInfoDecoder + } + + return func(decoder *cbor.StreamDecoder) (TypeInfo, error) { + rawTypeInfo, err := decoder.DecodeRawBytes() + if err != nil { + return nil, NewDecodingError(fmt.Errorf("failed to decode raw type info: %w", err)) + } + + if len(rawTypeInfo) > len(typeInfoRefTagHeadAndTagNumber) && + bytes.Equal( + rawTypeInfo[:len(typeInfoRefTagHeadAndTagNumber)], + typeInfoRefTagHeadAndTagNumber) { + + // Type info is encoded as type info ref. + + var index uint64 + + err = cbor.Unmarshal(rawTypeInfo[len(typeInfoRefTagHeadAndTagNumber):], &index) + if err != nil { + return nil, NewDecodingError(err) + } + + if index >= uint64(len(inlinedTypeInfo)) { + return nil, NewDecodingError(fmt.Errorf("failed to decode type info ref: expect index < %d, got %d", len(inlinedTypeInfo), index)) + } + + return inlinedTypeInfo[int(index)], nil + } + + // Decode type info as is. + + dec := cbor.NewByteStreamDecoder(rawTypeInfo) + + return defaultTypeInfoDecoder(dec) + } +} + +type ExtraData interface { + isExtraData() bool + Type() TypeInfo + Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error +} + +// compactMapExtraData is used for inlining compact values. +// compactMapExtraData includes hkeys and keys with map extra data +// because hkeys and keys are the same in order and content for +// all values with the same compact type and map seed. +type compactMapExtraData struct { + mapExtraData *MapExtraData + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []ComparableStorable // keys is ordered by mapExtraData.Seed +} + +var _ ExtraData = &compactMapExtraData{} + +const compactMapExtraDataLength = 3 + +func (c *compactMapExtraData) isExtraData() bool { + return true +} + +func (c *compactMapExtraData) Type() TypeInfo { + return c.mapExtraData.TypeInfo +} + +func (c *compactMapExtraData) Encode(enc *Encoder, encodeTypeInfo encodeTypeInfo) error { + err := enc.CBOR.EncodeArrayHead(compactMapExtraDataLength) + if err != nil { + return NewEncodingError(err) + } + + // element 0: map extra data + err = c.mapExtraData.Encode(enc, encodeTypeInfo) + if err != nil { + // err is already categorized by MapExtraData.Encode(). + return err + } + + // element 1: digests + totalDigestSize := len(c.hkeys) * digestSize + + var digests []byte + if totalDigestSize <= len(enc.Scratch) { + digests = enc.Scratch[:totalDigestSize] + } else { + digests = make([]byte, totalDigestSize) + } + + for i := 0; i < len(c.hkeys); i++ { + binary.BigEndian.PutUint64(digests[i*digestSize:], uint64(c.hkeys[i])) + } + + err = enc.CBOR.EncodeBytes(digests) + if err != nil { + return NewEncodingError(err) + } + + // element 2: field names + err = enc.CBOR.EncodeArrayHead(uint64(len(c.keys))) + if err != nil { + return NewEncodingError(err) + } + + for _, key := range c.keys { + err = key.Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by ComparableStorable.Encode(). + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode key's storable") + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newCompactMapExtraData( + dec *cbor.StreamDecoder, + decodeTypeInfo TypeInfoDecoder, + decodeStorable StorableDecoder, +) (*compactMapExtraData, error) { + + length, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if length != compactMapExtraDataLength { + return nil, NewDecodingError( + fmt.Errorf( + "compact extra data has invalid length %d, want %d", + length, + arrayExtraDataLength, + )) + } + + // element 0: map extra data + mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) + if err != nil { + // err is already categorized by newMapExtraData(). + return nil, err + } + + // element 1: digests + digestBytes, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + + if len(digestBytes)%digestSize != 0 { + return nil, NewDecodingError( + fmt.Errorf( + "decoding digests failed: number of bytes %d is not multiple of %d", + len(digestBytes), + digestSize)) + } + + digestCount := len(digestBytes) / digestSize + + // element 2: keys + keyCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if keyCount != uint64(digestCount) { + return nil, NewDecodingError( + fmt.Errorf( + "decoding compact map key failed: number of keys %d is different from number of digests %d", + keyCount, + digestCount)) + } + + hkeys := make([]Digest, digestCount) + for i := 0; i < digestCount; i++ { + hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) + } + + keys := make([]ComparableStorable, keyCount) + for i := uint64(0); i < keyCount; i++ { + // Decode compact map key + key, err := decodeStorable(dec, SlabIDUndefined, nil) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") + } + compactMapKey, ok := key.(ComparableStorable) + if !ok { + return nil, NewDecodingError(fmt.Errorf("failed to decode key's storable: got %T, expect ComparableStorable", key)) + } + keys[i] = compactMapKey + } + + return &compactMapExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil +} + +type compactMapTypeInfo struct { + index int + keys []ComparableStorable +} + +type extraDataAndEncodedTypeInfo struct { + extraData ExtraData + encodedTypeInfo string // cached encoded type info +} + +type InlinedExtraData struct { + extraData []extraDataAndEncodedTypeInfo // Used to encode deduplicated ExtraData in order + compactMapTypeSet map[string]compactMapTypeInfo // Used to deduplicate compactMapExtraData by encoded TypeInfo + sorted field names + arrayExtraDataSet map[string]int // Used to deduplicate arrayExtraData by encoded TypeInfo +} + +func newInlinedExtraData() *InlinedExtraData { + // Maps used for deduplication are initialized lazily. + return &InlinedExtraData{} +} + +const inlinedExtraDataArrayCount = 2 + +var typeInfoRefTagHeadAndTagNumber = []byte{0xd8, CBORTagTypeInfoRef} + +// Encode encodes inlined extra data as 2-element array: +// +// +-----------------------+------------------------+ +// | [+ inlined type info] | [+ inlined extra data] | +// +-----------------------+------------------------+ +func (ied *InlinedExtraData) Encode(enc *Encoder) error { + + typeInfos, typeInfoIndexes := ied.findDuplicateTypeInfo() + + var err error + + err = enc.CBOR.EncodeArrayHead(inlinedExtraDataArrayCount) + if err != nil { + return NewEncodingError(err) + } + + // element 0: array of duplicate type info + err = enc.CBOR.EncodeArrayHead(uint64(len(typeInfos))) + if err != nil { + return NewEncodingError(err) + } + + // Encode type info + for _, typeInfo := range typeInfos { + // Encode cached type info as is. + err = enc.CBOR.EncodeRawBytes([]byte(typeInfo)) + if err != nil { + return NewEncodingError(err) + } + } + + // element 1: deduplicated array of extra data + err = enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + if err != nil { + return NewEncodingError(err) + } + + // Encode inlined extra data + for _, extraDataInfo := range ied.extraData { + var tagNum uint64 + + switch extraDataInfo.extraData.(type) { + case *ArrayExtraData: + tagNum = CBORTagInlinedArrayExtraData + + case *MapExtraData: + tagNum = CBORTagInlinedMapExtraData + + case *compactMapExtraData: + tagNum = CBORTagInlinedCompactMapExtraData + + default: + return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraDataInfo.extraData)) + } + + err = enc.CBOR.EncodeTagHead(tagNum) + if err != nil { + return NewEncodingError(err) + } + + err = extraDataInfo.extraData.Encode(enc, func(enc *Encoder, _ TypeInfo) error { + encodedTypeInfo := extraDataInfo.encodedTypeInfo + + index, exist := typeInfoIndexes[encodedTypeInfo] + if !exist { + // typeInfo is not encoded separately, so encode typeInfo as is here. + err = enc.CBOR.EncodeRawBytes([]byte(encodedTypeInfo)) + if err != nil { + return NewEncodingError(err) + } + return nil + } + + err = enc.CBOR.EncodeRawBytes(typeInfoRefTagHeadAndTagNumber) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.EncodeUint64(uint64(index)) + if err != nil { + return NewEncodingError(err) + } + + return nil + }) + if err != nil { + // err is already categorized by ExtraData.Encode(). + return err + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (ied *InlinedExtraData) findDuplicateTypeInfo() ([]string, map[string]int) { + if len(ied.extraData) < 2 { + // No duplicate type info + return nil, nil + } + + // Make a copy of encoded type info to sort + encodedTypeInfo := make([]string, len(ied.extraData)) + for i, info := range ied.extraData { + encodedTypeInfo[i] = info.encodedTypeInfo + } + + sort.Strings(encodedTypeInfo) + + // Find duplicate type info + var duplicateTypeInfo []string + var duplicateTypeInfoIndexes map[string]int + + for currentIndex := 1; currentIndex < len(encodedTypeInfo); { + + if encodedTypeInfo[currentIndex-1] != encodedTypeInfo[currentIndex] { + currentIndex++ + continue + } + + // Found duplicate type info at currentIndex + duplicate := encodedTypeInfo[currentIndex] + + // Insert duplicate into duplicate type info list and map + duplicateTypeInfo = append(duplicateTypeInfo, duplicate) + + if duplicateTypeInfoIndexes == nil { + duplicateTypeInfoIndexes = make(map[string]int) + } + duplicateTypeInfoIndexes[duplicate] = len(duplicateTypeInfo) - 1 + + // Skip same duplicate from sorted list + currentIndex++ + for currentIndex < len(encodedTypeInfo) && encodedTypeInfo[currentIndex] == duplicate { + currentIndex++ + } + } + + return duplicateTypeInfo, duplicateTypeInfoIndexes +} + +func newInlinedExtraDataFromData( + data []byte, + decMode cbor.DecMode, + decodeStorable StorableDecoder, + defaultDecodeTypeInfo TypeInfoDecoder, +) ([]ExtraData, []byte, error) { + + dec := decMode.NewByteStreamDecoder(data) + + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if count != inlinedExtraDataArrayCount { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect %d elements, got %d elements", inlinedExtraDataArrayCount, count)) + } + + // element 0: array of duplicate type info + typeInfoCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + inlinedTypeInfo := make([]TypeInfo, int(typeInfoCount)) + for i := uint64(0); i < typeInfoCount; i++ { + inlinedTypeInfo[i], err = defaultDecodeTypeInfo(dec) + if err != nil { + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode typeInfo") + } + } + + decodeTypeInfo := decodeTypeInfoRefIfNeeded(inlinedTypeInfo, defaultDecodeTypeInfo) + + // element 1: array of deduplicated extra data info + extraDataCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if extraDataCount == 0 { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined extra data")) + } + + inlinedExtraData := make([]ExtraData, extraDataCount) + for i := uint64(0); i < extraDataCount; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + switch tagNum { + case CBORTagInlinedArrayExtraData: + inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) + if err != nil { + // err is already categorized by newArrayExtraData(). + return nil, nil, err + } + + case CBORTagInlinedMapExtraData: + inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) + if err != nil { + // err is already categorized by newMapExtraData(). + return nil, nil, err + } + + case CBORTagInlinedCompactMapExtraData: + inlinedExtraData[i], err = newCompactMapExtraData(dec, decodeTypeInfo, decodeStorable) + if err != nil { + // err is already categorized by newCompactMapExtraData(). + return nil, nil, err + } + + default: + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: unsupported tag number %d", tagNum)) + } + } + + return inlinedExtraData, data[dec.NumBytesDecoded():], nil +} + +// addArrayExtraData returns index of deduplicated array extra data. +// Array extra data is deduplicated by array type info ID because array +// extra data only contains type info. +func (ied *InlinedExtraData) addArrayExtraData(data *ArrayExtraData) (int, error) { + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + // err is already categorized by getEncodedTypeInfo(). + return 0, err + } + + if ied.arrayExtraDataSet == nil { + ied.arrayExtraDataSet = make(map[string]int) + } + + index, exist := ied.arrayExtraDataSet[encodedTypeInfo] + if exist { + return index, nil + } + + index = len(ied.extraData) + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{data, encodedTypeInfo}) + ied.arrayExtraDataSet[encodedTypeInfo] = index + + return index, nil +} + +// addMapExtraData returns index of map extra data. +// Map extra data is not deduplicated because it also contains count and seed. +func (ied *InlinedExtraData) addMapExtraData(data *MapExtraData) (int, error) { + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + // err is already categorized by getEncodedTypeInfo(). + return 0, err + } + + index := len(ied.extraData) + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{data, encodedTypeInfo}) + return index, nil +} + +// addCompactMapExtraData returns index of deduplicated compact map extra data. +// Compact map extra data is deduplicated by TypeInfo.ID() with sorted field names. +func (ied *InlinedExtraData) addCompactMapExtraData( + data *MapExtraData, + digests []Digest, + keys []ComparableStorable, +) (int, []ComparableStorable, error) { + + encodedTypeInfo, err := getEncodedTypeInfo(data.TypeInfo) + if err != nil { + // err is already categorized by getEncodedTypeInfo(). + return 0, nil, err + } + + if ied.compactMapTypeSet == nil { + ied.compactMapTypeSet = make(map[string]compactMapTypeInfo) + } + + compactMapTypeID := makeCompactMapTypeID(encodedTypeInfo, keys) + info, exist := ied.compactMapTypeSet[compactMapTypeID] + if exist { + return info.index, info.keys, nil + } + + compactMapData := &compactMapExtraData{ + mapExtraData: data, + hkeys: digests, + keys: keys, + } + + index := len(ied.extraData) + ied.extraData = append(ied.extraData, extraDataAndEncodedTypeInfo{compactMapData, encodedTypeInfo}) + + ied.compactMapTypeSet[compactMapTypeID] = compactMapTypeInfo{ + keys: keys, + index: index, + } + + return index, keys, nil +} + +func (ied *InlinedExtraData) empty() bool { + return len(ied.extraData) == 0 +} + +// makeCompactMapTypeID returns id of concatenated t.ID() with sorted names with "," as separator. +func makeCompactMapTypeID(encodedTypeInfo string, names []ComparableStorable) string { + const separator = "," + + if len(names) == 1 { + return encodedTypeInfo + separator + names[0].ID() + } + + sorter := newFieldNameSorter(names) + + sort.Sort(sorter) + + return encodedTypeInfo + separator + sorter.join(separator) +} + +// fieldNameSorter sorts names by index (not in place sort). +type fieldNameSorter struct { + names []ComparableStorable + index []int +} + +func newFieldNameSorter(names []ComparableStorable) *fieldNameSorter { + index := make([]int, len(names)) + for i := 0; i < len(names); i++ { + index[i] = i + } + return &fieldNameSorter{ + names: names, + index: index, + } +} + +func (fn *fieldNameSorter) Len() int { + return len(fn.names) +} + +func (fn *fieldNameSorter) Less(i, j int) bool { + i = fn.index[i] + j = fn.index[j] + return fn.names[i].Less(fn.names[j]) +} + +func (fn *fieldNameSorter) Swap(i, j int) { + fn.index[i], fn.index[j] = fn.index[j], fn.index[i] +} + +func (fn *fieldNameSorter) join(sep string) string { + var sb strings.Builder + for i, index := range fn.index { + if i > 0 { + sb.WriteString(sep) + } + sb.WriteString(fn.names[index].ID()) + } + return sb.String() +} + +func getEncodedTypeInfo(ti TypeInfo) (string, error) { + b := getTypeIDBuffer() + defer putTypeIDBuffer(b) + + enc := cbor.NewStreamEncoder(b) + err := ti.Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by TypeInfo.Encode(). + return "", wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode type info") + } + enc.Flush() + + return b.String(), nil +} + +const defaultTypeIDBufferSize = 256 + +var typeIDBufferPool = sync.Pool{ + New: func() interface{} { + e := new(bytes.Buffer) + e.Grow(defaultTypeIDBufferSize) + return e + }, +} + +func getTypeIDBuffer() *bytes.Buffer { + return typeIDBufferPool.Get().(*bytes.Buffer) +} + +func putTypeIDBuffer(e *bytes.Buffer) { + e.Reset() + typeIDBufferPool.Put(e) +} diff --git a/utils_test.go b/utils_test.go index 55f2df7..1e5b980 100644 --- a/utils_test.go +++ b/utils_test.go @@ -20,6 +20,7 @@ package atree import ( "flag" + "fmt" "math/rand" "testing" "time" @@ -91,6 +92,18 @@ type testTypeInfo struct { var _ TypeInfo = testTypeInfo{} +func (i testTypeInfo) Copy() TypeInfo { + return i +} + +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) Identifier() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(enc *cbor.StreamEncoder) error { return enc.EncodeUint64(i.value) } @@ -100,13 +113,50 @@ func (i testTypeInfo) Equal(other TypeInfo) bool { return ok && i.value == otherTestTypeInfo.value } +const testCompositeTypeInfoTagNum = 246 + +type testCompositeTypeInfo struct { + value uint64 +} + +var _ TypeInfo = testCompositeTypeInfo{} + +func (i testCompositeTypeInfo) Copy() TypeInfo { + return i +} + +func (i testCompositeTypeInfo) IsComposite() bool { + return true +} + +func (i testCompositeTypeInfo) Identifier() string { + return fmt.Sprintf("composite(%d)", i) +} + +func (i testCompositeTypeInfo) Encode(enc *cbor.StreamEncoder) error { + err := enc.EncodeTagHead(testCompositeTypeInfoTagNum) + if err != nil { + return err + } + return enc.EncodeUint64(i.value) +} + +func (i testCompositeTypeInfo) Equal(other TypeInfo) bool { + otherTestTypeInfo, ok := other.(testCompositeTypeInfo) + return ok && i.value == otherTestTypeInfo.value +} + func typeInfoComparator(a, b TypeInfo) bool { - x, ok := a.(testTypeInfo) - if !ok { + switch a := a.(type) { + case testTypeInfo: + return a.Equal(b) + + case testCompositeTypeInfo: + return a.Equal(b) + + default: return false } - y, ok := b.(testTypeInfo) - return ok && x.value == y.value } func newTestPersistentStorage(t testing.TB) *PersistentSlabStorage { @@ -150,6 +200,15 @@ func newTestPersistentStorageWithBaseStorage(t testing.TB, baseStorage BaseStora ) } +func newTestPersistentStorageWithBaseStorageAndDeltas(t testing.TB, baseStorage BaseStorage, data map[SlabID][]byte) *PersistentSlabStorage { + storage := newTestPersistentStorageWithBaseStorage(t, baseStorage) + for id, b := range data { + err := storage.baseStorage.Store(id, b) + require.NoError(t, err) + } + return storage +} + func newTestBasicStorage(t testing.TB) *BasicSlabStorage { encMode, err := cbor.EncOptions{}.EncMode() require.NoError(t, err) @@ -264,80 +323,118 @@ func (s *InMemBaseStorage) ResetReporter() { s.segmentsTouched = make(map[SlabID]struct{}) } -func valueEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - switch a.(type) { +func valueEqual(t *testing.T, expected Value, actual Value) { + switch expected := expected.(type) { + case arrayValue: + actual, ok := actual.(*Array) + require.True(t, ok) + + arrayEqual(t, expected, actual) + case *Array: - arrayEqual(t, tic, a, b) + require.FailNow(t, "expected value shouldn't be *Array") + + case mapValue: + actual, ok := actual.(*OrderedMap) + require.True(t, ok) + + mapEqual(t, expected, actual) + case *OrderedMap: - mapEqual(t, tic, a, b) + require.FailNow(t, "expected value shouldn't be *OrderedMap") + default: - require.Equal(t, a, b) + require.Equal(t, expected, actual) } } -func arrayEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - array1, ok := a.(*Array) - require.True(t, ok) +func arrayEqual(t *testing.T, expected arrayValue, actual *Array) { + require.Equal(t, uint64(len(expected)), actual.Count()) - array2, ok := b.(*Array) - require.True(t, ok) + iterator, err := actual.ReadOnlyIterator() + require.NoError(t, err) - require.True(t, tic(array1.Type(), array2.Type())) - require.Equal(t, array1.Address(), array2.Address()) - require.Equal(t, array1.Count(), array2.Count()) - require.Equal(t, array1.SlabID(), array2.SlabID()) + i := 0 + for { + actualValue, err := iterator.Next() + require.NoError(t, err) - iterator1, err := array1.Iterator() - require.NoError(t, err) + if actualValue == nil { + break + } + + valueEqual(t, expected[i], actualValue) + i++ + } + require.Equal(t, len(expected), i) +} + +func mapEqual(t *testing.T, expected mapValue, actual *OrderedMap) { + require.Equal(t, uint64(len(expected)), actual.Count()) - iterator2, err := array2.Iterator() + iterator, err := actual.ReadOnlyIterator() require.NoError(t, err) + i := 0 for { - value1, err := iterator1.Next() - require.NoError(t, err) - - value2, err := iterator2.Next() + actualKey, actualValue, err := iterator.Next() require.NoError(t, err) - valueEqual(t, tic, value1, value2) - - if value1 == nil || value2 == nil { + if actualKey == nil { break } + + expectedValue, exist := expected[actualKey] + require.True(t, exist) + + valueEqual(t, expectedValue, actualValue) + i++ } + require.Equal(t, len(expected), i) } -func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { - m1, ok := a.(*OrderedMap) - require.True(t, ok) +func valueIDToSlabID(vid ValueID) SlabID { + var id SlabID + copy(id.address[:], vid[:slabAddressSize]) + copy(id.index[:], vid[slabAddressSize:]) + return id +} - m2, ok := b.(*OrderedMap) - require.True(t, ok) +func testInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} - require.True(t, tic(m1.Type(), m2.Type())) - require.Equal(t, m1.Address(), m2.Address()) - require.Equal(t, m1.Count(), m2.Count()) - require.Equal(t, m1.SlabID(), m2.SlabID()) +func testNotInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testNotInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} - iterator1, err := m1.Iterator() - require.NoError(t, err) +func testInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, SlabIDUndefined, slabID) - iterator2, err := m2.Iterator() - require.NoError(t, err) + require.Equal(t, expectedAddress[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) +} - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) +func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, expectedAddress, slabID.address) + require.NotEqual(t, SlabIndexUndefined, slabID.index) - key2, value2, err := iterator2.Next() - require.NoError(t, err) + require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) + require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) +} - valueEqual(t, tic, key1, key2) - valueEqual(t, tic, value1, value2) +type arrayValue []Value - if key1 == nil || key2 == nil { - break - } - } +var _ Value = &arrayValue{} + +func (v arrayValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") +} + +type mapValue map[Value]Value + +var _ Value = &mapValue{} + +func (v mapValue) Storable(SlabStorage, Address, uint64) (Storable, error) { + panic("not reachable") } diff --git a/value.go b/value.go index 71ba2f9..0652d1a 100644 --- a/value.go +++ b/value.go @@ -25,3 +25,14 @@ type Value interface { type ValueComparator func(SlabStorage, Value, Storable) (bool, error) type StorableComparator func(Storable, Storable) bool + +type parentUpdater func() (found bool, err error) + +// mutableValueNotifier is an interface that allows mutable child value to notify and update parent. +type mutableValueNotifier interface { + Value + ValueID() ValueID + setParentUpdater(parentUpdater) + Inlined() bool + Inlinable(uint64) bool +}