From d6f3daaf306d298f4267faa917201146b5352b62 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 14 Sep 2023 11:47:32 -0500 Subject: [PATCH] Inline child array/map data slab into parent slab Currently, every array or map are stored in its own slab and parent slab refers to child array or map by SlabID. The current approach can lead to many small slabs, especially for Cadence data structures with multiple nested levels. This commit inlines child array/map in parent slab when: - child array/map fits in one slab (root slab is data slab) - encoded size of inlined child array/map is less than the max inline size limit enforced by parent This commit optimizes encoding size by: - reusing inlined array types - reusing seed, digests, and field names of inlined composite types Also update debugging code to handle inlined array/map element. --- array.go | 657 ++- array_debug.go | 236 +- array_test.go | 4959 ++++++++++++----- basicarray.go | 15 +- cmd/main/main.go | 10 +- cmd/stress/storable.go | 2 +- cmd/stress/typeinfo.go | 10 + encode.go | 33 +- map.go | 885 ++- map_debug.go | 103 +- map_test.go | 11229 +++++++++++++++++++++++++++++---------- storable.go | 17 + storable_test.go | 69 +- storage.go | 20 + storage_test.go | 2 - typeinfo.go | 343 ++ utils_test.go | 133 +- value.go | 9 + 18 files changed, 14336 insertions(+), 4396 deletions(-) diff --git a/array.go b/array.go index f661e9c4..03d35617 100644 --- a/array.go +++ b/array.go @@ -19,6 +19,7 @@ package atree import ( + "bytes" "encoding/binary" "fmt" "math" @@ -57,6 +58,15 @@ const ( // 32 is faster than 24 and 40. linearScanThreshold = 32 + + // inlined array data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value ID index head (1 byte) + + // value ID index (8 bytes) + + // element array head (3 bytes) + inlinedArrayDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 + arrayDataSlabElementHeadSize ) type ArraySlabHeader struct { @@ -69,6 +79,8 @@ type ArrayExtraData struct { TypeInfo TypeInfo // array type } +var _ ExtraData = &ArrayExtraData{} + // ArrayDataSlab is leaf node, implementing ArraySlab. type ArrayDataSlab struct { next SlabID @@ -78,6 +90,10 @@ type ArrayDataSlab struct { // extraData is data that is prepended to encoded slab data. // It isn't included in slab size calculation for splitting and merging. extraData *ArrayExtraData + + // inlined indicates whether this slab is stored inlined in its parent slab. + // This flag affects Encode(), ByteSize(), etc. + inlined bool } func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { @@ -85,8 +101,9 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, + Storage: storage, + root: a, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -113,8 +130,9 @@ func (a *ArrayMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { return nil, NewNotValueError(a.SlabID()) } return &Array{ - Storage: storage, - root: a, + Storage: storage, + root: a, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -142,24 +160,35 @@ type ArraySlab interface { SetExtraData(*ArrayExtraData) PopIterate(SlabStorage, ArrayPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool } // Array is tree type Array struct { Storage SlabStorage root ArraySlab + + // parentUpdater is a callback that notifies parent container when this array is modified. + // If this callback is null, this array has no parent. Otherwise, this array has parent + // and this callback must be used when this array is changed by Append, Insert, Set, + // Remove, etc. + parentUpdater parentUpdater + + // mutableElementIndex tracks index of mutable element, such as Array and OrderedMap. + // This is needed by mutable element to properly update itself through parentUpdater. + // TODO: maybe optimize by replacing map to get faster updates. + mutableElementIndex map[ValueID]uint64 } var _ Value = &Array{} +var _ valueNotifier = &Array{} func (a *Array) Address() Address { return a.root.SlabID().address } -func (a *Array) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { - return SlabIDStorable(a.SlabID()), nil -} - const arrayExtraDataLength = 1 func newArrayExtraDataFromData( @@ -208,6 +237,10 @@ func newArrayExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) return &ArrayExtraData{TypeInfo: typeInfo}, nil } +func (a *ArrayExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info] @@ -353,25 +386,26 @@ func newArrayDataSlabFromDataV0( return nil, NewDecodingError(err) } + // Compute slab size for version 1. + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable - } - - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize + slabSize += storable.ByteSize() } header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -387,21 +421,22 @@ func newArrayDataSlabFromDataV0( // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func newArrayDataSlabFromDataV1( id SlabID, h head, @@ -415,6 +450,7 @@ func newArrayDataSlabFromDataV1( ) { var err error var extraData *ArrayExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -426,6 +462,20 @@ func newArrayDataSlabFromDataV1( } } + // Decode inlined slab extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // err is categorized already by newInlinedExtraDataFromData. + return nil, err + } + } + // Decode next slab ID if h.hasNextSlabID() { next, err = NewSlabIDFromRawBytes(data) @@ -450,14 +500,20 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingError(err) } + slabSize := uint32(arrayDataSlabPrefixSize) + if h.isRoot() { + slabSize = arrayRootDataSlabPrefixSize + } + elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") } elements[i] = storable + slabSize += storable.ByteSize() } // Check if data reached EOF @@ -465,15 +521,9 @@ func newArrayDataSlabFromDataV1( return nil, NewDecodingErrorf("data has %d bytes of extraneous data for array data slab", len(data)-cborDec.NumBytesDecoded()) } - // Compute slab size for version 1. - slabSize := versionAndFlagSize + cborDec.NumBytesDecoded() - if !h.isRoot() { - slabSize += slabIDSize - } - header := ArraySlabHeader{ slabID: id, - size: uint32(slabSize), + size: slabSize, count: uint32(elemCount), } @@ -482,30 +532,234 @@ func newArrayDataSlabFromDataV1( header: header, elements: elements, extraData: extraData, + inlined: false, // this function is only called when slab is not inlined. }, nil } +// DecodeInlinedArrayStorable decodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedArrayStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedArrayDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedArrayDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d elements, got %d elements", + inlinedArrayDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*ArrayExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect *ArrayExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined array data slab: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, index) + + // Decode array elements (CBOR array) + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + size := uint32(inlinedArrayDataSlabPrefixSize) + + elements := make([]Storable, elemCount) + for i := 0; i < int(elemCount); i++ { + storable, err := decodeStorable(dec, slabID, inlinedExtraData) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") + } + elements[i] = storable + + size += storable.ByteSize() + } + + header := ArraySlabHeader{ + slabID: slabID, + size: size, + count: uint32(elemCount), + } + + return &ArrayDataSlab{ + header: header, + elements: elements, + extraData: extraData, + inlined: true, + }, nil +} + +// encodeAsInlined encodes inlined array data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedArray, +// and tag contant as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if a.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root array data slab as inlined")) + } + + if !a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone array data slab as inlined")) + } + + extraDataIndex := inlinedTypeInfo.addArrayExtraData(a.extraData) + + if extraDataIndex > 255 { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab: extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedArray, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(a.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: array elements + err = a.encodeElements(enc, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + // Encode encodes this array data slab to the given encoder. // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded array of elements // // See ArrayExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. func (a *ArrayDataSlab) Encode(enc *Encoder) error { + if a.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined array data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode array elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // TODO: maybe use a buffer pool + var elementBuf bytes.Buffer + elementEnc := NewEncoder(&elementBuf, enc.encMode) + + err := a.encodeElements(elementEnc, inlinedTypes) + if err != nil { + // err is already categorized by Array.encodeElements(). + return err + } + + err = elementEnc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + const version = 1 h, err := newArraySlabHead(version, slabArrayData) @@ -525,15 +779,18 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { h.setRoot() } + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + // Encode head (version + flag) _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } - // Encode header + // Encode extra data if a.extraData != nil { - // Encode extra data err = a.extraData.Encode(enc) if err != nil { // err is already categorized by ArrayExtraData.Encode(). @@ -541,6 +798,15 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode inlined extra data + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + // err is already categorized by inlinedExtraData.Encode(). + return err + } + } + // Encode next slab ID if a.next != SlabIDUndefined { n, err := a.next.ToRawBytes(enc.Scratch[:]) @@ -555,6 +821,21 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { } } + // Encode elements by copying raw bytes from previous buffer + err = enc.CBOR.EncodeRawBytes(elementBuf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array size manually for fix-sized encoding enc.Scratch[0] = 0x80 | 25 @@ -568,14 +849,14 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { // Write scratch content to encoder totalSize := countOffset + countSize - _, err = enc.Write(enc.Scratch[:totalSize]) + err := enc.CBOR.EncodeRawBytes(enc.Scratch[:totalSize]) if err != nil { return NewEncodingError(err) } // Encode data slab content (array of elements) for _, e := range a.elements { - err = e.Encode(enc) + err = encodeStorableAsElement(enc, e, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element") @@ -590,6 +871,35 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error { return nil } +func (a *ArrayDataSlab) Inlined() bool { + return a.inlined +} + +// Inlinable returns true if +// - array data slab is root slab +// - size of inlined array data slab <= maxInlineSize +func (a *ArrayDataSlab) Inlinable(maxInlineSize uint64) bool { + if a.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + // At this point, this data slab is either + // - inlined data slab, or + // - not inlined root data slab + + // Compute inlined size from cached slab size + inlinedSize := a.header.size + if !a.inlined { + inlinedSize = inlinedSize - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + } + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + func (a *ArrayDataSlab) hasPointer() bool { for _, e := range a.elements { if hasPointer(e) { @@ -606,6 +916,9 @@ func (a *ArrayDataSlab) ChildStorables() []Storable { } func (a *ArrayDataSlab) getPrefixSize() uint32 { + if a.inlined { + return inlinedArrayDataSlabPrefixSize + } if a.extraData != nil { return arrayRootDataSlabPrefixSize } @@ -644,10 +957,12 @@ func (a *ArrayDataSlab) Set(storage SlabStorage, address Address, index uint64, a.header.size = size - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return oldElem, nil @@ -675,10 +990,12 @@ func (a *ArrayDataSlab) Insert(storage SlabStorage, address Address, index uint6 a.header.count++ a.header.size += storable.ByteSize() - err = storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return nil @@ -705,10 +1022,12 @@ func (a *ArrayDataSlab) Remove(storage SlabStorage, index uint64) (Storable, err a.header.count-- a.header.size -= v.ByteSize() - err := storage.Store(a.header.slabID, a) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + if !a.inlined { + err := storage.Store(a.header.slabID, a) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.header.slabID)) + } } return v, nil @@ -2201,7 +2520,15 @@ func (a *ArrayMetaDataSlab) CanLendToRight(size uint32) bool { return a.header.size-arraySlabHeaderSize*n > uint32(minThreshold) } -func (a ArrayMetaDataSlab) IsData() bool { +func (a *ArrayMetaDataSlab) Inlined() bool { + return false +} + +func (a *ArrayMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + +func (a *ArrayMetaDataSlab) IsData() bool { return false } @@ -2314,8 +2641,9 @@ func NewArray(storage SlabStorage, address Address, typeInfo TypeInfo) (*Array, } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } @@ -2336,22 +2664,98 @@ func NewArrayWithRootID(storage SlabStorage, rootID SlabID) (*Array, error) { } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } +// TODO: maybe optimize this +func (a *Array) incrementIndexFrom(index uint64) { + for id, i := range a.mutableElementIndex { + if i >= index { + a.mutableElementIndex[id]++ + } + } +} + +// TODO: maybe optimize this +func (a *Array) decrementIndexFrom(index uint64) { + for id, i := range a.mutableElementIndex { + if i > index { + a.mutableElementIndex[id]-- + } + } +} + +func (a *Array) getIndexByValueID(id ValueID) (uint64, bool) { + index, exist := a.mutableElementIndex[id] + return index, exist +} + +func (a *Array) setParentUpdater(f parentUpdater) { + a.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value so +// parent array a can be notified when child value is modified. +func (a *Array) setCallbackWithChild(i uint64, child Value) { + c, ok := child.(valueNotifier) + if !ok { + return + } + + vid := c.ValueID() + + // Index i will be updated with array operations, which affects element index. + a.mutableElementIndex[vid] = i + + c.setParentUpdater(func() error { + + // Get latest index by child value ID. + index, exist := a.getIndexByValueID(vid) + if !exist { + return NewFatalError(fmt.Errorf("failed to get index for child element with value id %s", vid)) + } + + // Set child value with parent array using updated index. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := a.Set(index, c) + if err != nil { + return err + } + + if existingValueStorable == nil { + return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) + } + + return nil + }) +} + +// notifyParentIfNeeded calls parent updater if this array is a child value. +func (a *Array) notifyParentIfNeeded() error { + if a.parentUpdater == nil { + return nil + } + return a.parentUpdater() +} + func (a *Array) Get(i uint64) (Value, error) { storable, err := a.root.Get(a.Storage, i) if err != nil { // Don't need to wrap error as external error because err is already categorized by ArraySlab.Get(). return nil, err } + v, err := storable.StoredValue(a.Storage) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + a.setCallbackWithChild(i, v) + return v, nil } @@ -2382,6 +2786,11 @@ func (a *Array) Set(index uint64, value Value) (Storable, error) { } } + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return existingStorable, nil } @@ -2402,7 +2811,9 @@ func (a *Array) Insert(index uint64, value Value) error { return a.splitRoot() } - return nil + a.incrementIndexFrom(index) + + return a.notifyParentIfNeeded() } func (a *Array) Remove(index uint64) (Storable, error) { @@ -2424,6 +2835,13 @@ func (a *Array) Remove(index uint64) (Storable, error) { } } + a.decrementIndexFrom(index) + + err = a.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return storable, nil } @@ -2534,6 +2952,89 @@ func (a *Array) promoteChildAsNewRoot(childID SlabID) error { return nil } +func (a *Array) Inlined() bool { + return a.root.Inlined() +} + +// Storable returns array a as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (a *Array) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { + + inlined := a.root.Inlined() + inlinable := a.root.Inlinable(maxInlineSize) + + if inlinable && inlined { + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return a.root, nil + } + + if !inlinable && !inlined { + // Root slab is not inlinable and was not inlined. + // Return root slab ID as storable, no size adjustment and change to storage. + return SlabIDStorable(a.SlabID()), nil + } + + if inlinable && !inlined { + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + + // Inlineable root slab must be data slab. + rootDataSlab, ok := a.root.(*ArrayDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlinable array slab type %T", a.root)) + } + + rootID := rootDataSlab.header.slabID + + // Remove root slab from storage because it is going to be inlined. + err := a.Storage.Remove(rootID) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + } + + // Update root data slab size as inlined slab. + rootDataSlab.header.size = rootDataSlab.header.size - + arrayRootDataSlabPrefixSize + + inlinedArrayDataSlabPrefixSize + + // Update root data slab inlined status. + rootDataSlab.inlined = true + + return rootDataSlab, nil + } + + // here, root slab is NOT inlinable and was previously inlined. + + // Un-inline root slab. + + // Inlined root slab must be data slab. + rootDataSlab, ok := a.root.(*ArrayDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined array slab type %T", a.root)) + } + + // Update root data slab size + rootDataSlab.header.size = rootDataSlab.header.size - + inlinedArrayDataSlabPrefixSize + + arrayRootDataSlabPrefixSize + + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := a.Storage.Store(rootDataSlab.header.slabID, a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.SlabID())) + } + + return SlabIDStorable(a.SlabID()), nil +} + var emptyArrayIterator = &ArrayIterator{} type ArrayIterator struct { @@ -2716,17 +3217,14 @@ func (a *Array) Count() uint64 { } func (a *Array) SlabID() SlabID { + if a.root.Inlined() { + return SlabIDUndefined + } return a.root.SlabID() } func (a *Array) ValueID() ValueID { - sid := a.SlabID() - - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) - - return id + return slabIDToValueID(a.root.SlabID()) } func (a *Array) Type() TypeInfo { @@ -2831,20 +3329,30 @@ func (a *Array) PopIterate(fn ArrayPopIterationFunc) error { extraData := a.root.ExtraData() + inlined := a.root.Inlined() + + size := uint32(arrayRootDataSlabPrefixSize) + if inlined { + size = inlinedArrayDataSlabPrefixSize + } + // Set root to empty data slab a.root = &ArrayDataSlab{ header: ArraySlabHeader{ slabID: rootID, - size: arrayRootDataSlabPrefixSize, + size: size, }, extraData: extraData, + inlined: inlined, } // Save root slab - err = a.Storage.Store(a.root.SlabID(), a.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + if !a.Inlined() { + err = a.Storage.Store(a.root.SlabID(), a.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", a.root.SlabID())) + } } return nil @@ -3007,8 +3515,9 @@ func NewArrayFromBatchData(storage SlabStorage, address Address, typeInfo TypeIn } return &Array{ - Storage: storage, - root: root, + Storage: storage, + root: root, + mutableElementIndex: make(map[ValueID]uint64), }, nil } diff --git a/array_debug.go b/array_debug.go index 64cf0a07..9c18cbb1 100644 --- a/array_debug.go +++ b/array_debug.go @@ -66,12 +66,9 @@ func GetArrayStats(a *Array) (ArrayStats, error) { if slab.IsData() { dataSlabCount++ - childStorables := slab.ChildStorables() - for _, s := range childStorables { - if _, ok := s.(SlabIDStorable); ok { - storableSlabCount++ - } - } + ids := getSlabIDFromStorable(slab, nil) + storableSlabCount += uint64(len(ids)) + } else { metaDataSlabCount++ @@ -134,12 +131,7 @@ func DumpArraySlabs(a *Array) ([]string, error) { dataSlab := slab.(*ArrayDataSlab) dumps = append(dumps, fmt.Sprintf("level %d, %s", level+1, dataSlab)) - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) } else { meta := slab.(*ArrayMetaDataSlab) @@ -193,7 +185,7 @@ func ValidArray(a *Array, typeInfo TypeInfo, tic TypeInfoComparator, hip HashInp } computedCount, dataSlabIDs, nextDataSlabIDs, err := - validArraySlab(tic, hip, a.Storage, a.root.Header().slabID, 0, nil, []SlabID{}, []SlabID{}) + validArraySlab(tic, hip, a.Storage, a.root, 0, nil, []SlabID{}, []SlabID{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return err @@ -217,7 +209,7 @@ func validArraySlab( tic TypeInfoComparator, hip HashInputProvider, storage SlabStorage, - id SlabID, + slab ArraySlab, level int, headerFromParentSlab *ArraySlabHeader, dataSlabIDs []SlabID, @@ -229,34 +221,30 @@ func validArraySlab( err error, ) { - slab, err := getArraySlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getArraySlab(). - return 0, nil, nil, err - } + id := slab.Header().slabID if level > 0 { // Verify that non-root slab doesn't have extra data if slab.ExtraData() != nil { - return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %d has extra data", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s has extra data", id)) } // Verify that non-root slab doesn't underflow if underflowSize, underflow := slab.IsUnderflow(); underflow { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d underflows by %d bytes", id, underflowSize)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s underflows by %d bytes", id, underflowSize)) } } // Verify that slab doesn't overflow if slab.IsFull() { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d overflows", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s overflows", id)) } // Verify that header is in sync with header from parent slab if headerFromParentSlab != nil { if !reflect.DeepEqual(*headerFromParentSlab, slab.Header()) { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d header %+v is different from header %+v from parent slab", + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s header %+v is different from header %+v from parent slab", id, slab.Header(), headerFromParentSlab)) } } @@ -264,25 +252,34 @@ func validArraySlab( if slab.IsData() { dataSlab, ok := slab.(*ArrayDataSlab) if !ok { - return 0, nil, nil, NewFatalError(fmt.Errorf("slab %d is not ArrayDataSlab", id)) + return 0, nil, nil, NewFatalError(fmt.Errorf("slab %s is not ArrayDataSlab", id)) } // Verify that element count is the same as header.count if uint32(len(dataSlab.elements)) != dataSlab.header.count { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header count %d is wrong, want %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header count %d is wrong, want %d", id, dataSlab.header.count, len(dataSlab.elements))) } + // Verify that only root slab can be inlined + if level > 0 && slab.Inlined() { + return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + // Verify that aggregated element size + slab prefix is the same as header.size computedSize := uint32(arrayDataSlabPrefixSize) if level == 0 { computedSize = uint32(arrayRootDataSlabPrefixSize) + if slab.Inlined() { + computedSize = uint32(inlinedArrayDataSlabPrefixSize) + } } + for _, e := range dataSlab.elements { // Verify element size is <= inline size if e.ByteSize() > uint32(maxInlineArrayElementSize) { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d element %s size %d is too large, want < %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s element %s size %d is too large, want < %d", id, e, e.ByteSize(), maxInlineArrayElementSize)) } @@ -290,7 +287,7 @@ func validArraySlab( } if computedSize != dataSlab.header.size { - return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %d header size %d is wrong, want %d", + return 0, nil, nil, NewFatalError(fmt.Errorf("data slab %s header size %d is wrong, want %d", id, dataSlab.header.size, computedSize)) } @@ -315,7 +312,7 @@ func validArraySlab( if err != nil { // Don't need to wrap error as external error because err is already categorized by ValidValue(). return 0, nil, nil, fmt.Errorf( - "data slab %d element %s isn't valid: %w", + "data slab %s element %q isn't valid: %w", id, e, err, ) } @@ -351,10 +348,16 @@ func validArraySlab( for i := 0; i < len(meta.childrenHeaders); i++ { h := meta.childrenHeaders[i] + childSlab, err := getArraySlab(storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getArraySlab(). + return 0, nil, nil, err + } + // Verify child slabs var count uint32 count, dataSlabIDs, nextDataSlabIDs, err = - validArraySlab(tic, hip, storage, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs) + validArraySlab(tic, hip, storage, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs) if err != nil { // Don't need to wrap error as external error because err is already categorized by validArraySlab(). return 0, nil, nil, err @@ -446,15 +449,30 @@ func validArraySlabSerialization( } // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined composite because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // composite extra data section for reuse, and only composite field + // values are encoded in non-extra data section. + // This reduces encoding size because composite values of the same + // composite type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError(fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError(fmt.Errorf("slab %s encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) + } } // Compare encoded data of original slab with encoded data of decoded slab @@ -548,6 +566,11 @@ func arrayDataSlabEqual( return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -567,14 +590,14 @@ func arrayDataSlabEqual( for i := 0; i < len(expected.elements); i++ { ee := expected.elements[i] ae := actual.elements[i] - if !compare(ee, ae) { - return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) - } - // Compare nested element - if idStorable, ok := ee.(SlabIDStorable); ok { + switch ee := ee.(type) { + case SlabIDStorable: + if !compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } - ev, err := idStorable.StoredValue(storage) + ev, err := ee.StoredValue(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err @@ -588,6 +611,27 @@ func arrayDataSlabEqual( decodeTypeInfo, compare, ) + + case *ArrayDataSlab: + ae, ok := ae.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + case *MapDataSlab: + ae, ok := ae.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + default: + if !compare(ee, ae) { + return NewFatalError(fmt.Errorf("element %d %+v is wrong, want %+v", i, ae, ee)) + } } } @@ -670,7 +714,7 @@ func ValidValueSerialization( return nil } -func computeSlabSize(data []byte) (int, error) { +func computeSize(data []byte) (int, error) { if len(data) < versionAndFlagSize { return 0, NewDecodingError(fmt.Errorf("data is too short")) } @@ -680,20 +724,23 @@ func computeSlabSize(data []byte) (int, error) { return 0, NewDecodingError(err) } - slabExtraDataSize, err := getExtraDataSize(h, data[versionAndFlagSize:]) + slabExtraDataSize, inlinedSlabExtrDataSize, err := getExtraDataSizes(h, data[versionAndFlagSize:]) if err != nil { return 0, err } - // Computed slab size (slab header size): - // - excludes slab extra data size - // - adds next slab ID for non-root data slab if not encoded - size := len(data) - slabExtraDataSize - isDataSlab := h.getSlabArrayType() == slabArrayData || h.getSlabMapType() == slabMapData || h.getSlabMapType() == slabMapCollisionGroup + // computed size (slab header size): + // - excludes slab extra data size + // - excludes inlined slab extra data size + // - adds next slab ID for non-root data slab if not encoded + size := len(data) + size -= slabExtraDataSize + size -= inlinedSlabExtrDataSize + if !h.isRoot() && isDataSlab && !h.hasNextSlabID() { size += slabIDSize } @@ -701,15 +748,102 @@ func computeSlabSize(data []byte) (int, error) { return size, nil } -func getExtraDataSize(h head, data []byte) (int, error) { +func hasInlinedComposite(data []byte) (bool, error) { + if len(data) < versionAndFlagSize { + return false, NewDecodingError(fmt.Errorf("data is too short")) + } + + h, err := newHeadFromData(data[:versionAndFlagSize]) + if err != nil { + return false, NewDecodingError(err) + } + + if !h.hasInlinedSlabs() { + return false, nil + } + + data = data[versionAndFlagSize:] + + // Skip slab extra data if needed. if h.isRoot() { dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) b, err := dec.DecodeRawBytes() if err != nil { - return 0, NewDecodingError(err) + return false, NewDecodingError(err) + } + + data = data[len(b):] + } + + // Parse inlined extra data to find composite extra data. + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + count, err := dec.DecodeArrayHead() + if err != nil { + return false, NewDecodingError(err) + } + + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return false, NewDecodingError(err) + } + if tagNum == CBORTagInlinedCompositeExtraData { + return true, nil + } + err = dec.Skip() + if err != nil { + return false, NewDecodingError(err) + } + } + + return false, nil +} + +func getExtraDataSizes(h head, data []byte) (int, int, error) { + + var slabExtraDataSize, inlinedSlabExtraDataSize int + + if h.isRoot() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + slabExtraDataSize = len(b) + + data = data[slabExtraDataSize:] + } + + if h.hasInlinedSlabs() { + dec := cbor.NewStreamDecoder(bytes.NewBuffer(data)) + b, err := dec.DecodeRawBytes() + if err != nil { + return 0, 0, NewDecodingError(err) + } + inlinedSlabExtraDataSize = len(b) + } + + return slabExtraDataSize, inlinedSlabExtraDataSize, nil +} + +// getSlabIDFromStorable appends slab IDs from storable to ids. +// This function traverses child storables. If child storable +// is inlined map or array, inlined map or array is also traversed. +func getSlabIDFromStorable(storable Storable, ids []SlabID) []SlabID { + childStorables := storable.ChildStorables() + + for _, e := range childStorables { + switch e := e.(type) { + case SlabIDStorable: + ids = append(ids, SlabID(e)) + + case *ArrayDataSlab: + ids = getSlabIDFromStorable(e, ids) + + case *MapDataSlab: + ids = getSlabIDFromStorable(e, ids) } - return len(b), nil } - return 0, nil + return ids } diff --git a/array_test.go b/array_test.go index ad08ac9d..ed1f57d4 100644 --- a/array_test.go +++ b/array_test.go @@ -1374,7 +1374,7 @@ func TestArrayNestedArrayMap(t *testing.T) { storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // Create a list of arrays with 2 elements. + // Create a list of arrays with 1 element. nestedArrays := make([]Value, arraySize) for i := uint64(0); i < arraySize; i++ { nested, err := NewArray(storage, address, nestedTypeInfo) @@ -1782,7 +1782,7 @@ func TestArrayEncodeDecode(t *testing.T) { verifyEmptyArray(t, storage2, typeInfo, address, array2) }) - t.Run("dataslab as root", func(t *testing.T) { + t.Run("root dataslab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1828,7 +1828,7 @@ func TestArrayEncodeDecode(t *testing.T) { verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("has pointers", func(t *testing.T) { + t.Run("root metadata slab", func(t *testing.T) { typeInfo := testTypeInfo{42} storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} @@ -1836,35 +1836,19 @@ func TestArrayEncodeDecode(t *testing.T) { array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - const arraySize = 20 + const arraySize = 18 values := make([]Value, arraySize) - for i := uint64(0); i < arraySize-1; i++ { + for i := uint64(0); i < arraySize; i++ { v := NewStringValue(strings.Repeat("a", 22)) values[i] = v + err := array.Append(v) require.NoError(t, err) } - typeInfo2 := testTypeInfo{43} - - nestedArray, err := NewArray(storage, address, typeInfo2) - require.NoError(t, err) - - err = nestedArray.Append(Uint64Value(0)) - require.NoError(t, err) - - values[arraySize-1] = nestedArray - - err = array.Append(nestedArray) - require.NoError(t, err) - - require.Equal(t, uint64(arraySize), array.Count()) - require.Equal(t, uint64(1), nestedArray.Count()) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -1892,8 +1876,8 @@ func TestArrayEncodeDecode(t *testing.T) { 0x00, 0xe4, // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x00, 0x00, 0x00, 0x0b, - 0x01, 0x0e, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, }, // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] @@ -1918,14 +1902,14 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, }, - // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + // (data slab) data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] id3: { // version 0x10, // array data slab flag - 0x40, + 0x00, // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x0b, + 0x99, 0x00, 0x09, // CBOR encoded array elements 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, @@ -1936,27 +1920,6 @@ func TestArrayEncodeDecode(t *testing.T) { 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - - // (data slab) next: 0, data: [0] - id4: { - // version - 0x10, - // extra data flag - 0x80, - - // extra data - // array of extra data - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, }, } @@ -1966,7 +1929,6 @@ func TestArrayEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], m[id1]) require.Equal(t, expected[id2], m[id2]) require.Equal(t, expected[id3], m[id3]) - require.Equal(t, expected[id4], m[id4]) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, m) @@ -1977,1589 +1939,4197 @@ func TestArrayEncodeDecode(t *testing.T) { verifyArray(t, storage2, typeInfo, address, array2, values, false) }) -} - -func TestArrayEncodeDecodeRandomValues(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Same type info is reused. + t.Run("root data slab, inlined child array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const opCount = 8192 + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - r := newRand(t) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) - array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + err = childArray.Append(v) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array, values, false) + err = parentArray.Append(childArray) + require.NoError(t, err) - // Decode data to new storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + values[i] = childArray + } - // Test new array from storage2 - array2, err := NewArrayWithRootID(storage2, array.SlabID()) - require.NoError(t, err) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - verifyArray(t, storage2, typeInfo, address, array2, values, false) -} + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, -func TestEmptyArray(t *testing.T) { + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - t.Parallel() + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestBasicStorage(t) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - t.Run("get", func(t *testing.T) { - s, err := array.Get(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - t.Run("set", func(t *testing.T) { - s, err := array.Set(0, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) - t.Run("insert", func(t *testing.T) { - err := array.Insert(1, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("remove", func(t *testing.T) { - s, err := array.Remove(0) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var indexOutOfBoundsError *IndexOutOfBoundsError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &indexOutOfBoundsError) - require.ErrorAs(t, userError, &indexOutOfBoundsError) - require.Nil(t, s) - }) + // Different type info are encoded. + t.Run("root data slab, inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - t.Run("iterate", func(t *testing.T) { - i := uint64(0) - err := array.Iterate(func(v Value) (bool, error) { - i++ - return true, nil - }) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), i) - }) - t.Run("count", func(t *testing.T) { - count := array.Count() - require.Equal(t, uint64(0), count) - }) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, array.Type())) - }) + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - // TestArrayEncodeDecode/empty tests empty array encoding and decoding -} + err = childArray.Append(v) + require.NoError(t, err) -func TestArrayStringElement(t *testing.T) { + err = parentArray.Append(childArray) + require.NoError(t, err) - t.Parallel() + values[i] = childArray + } - t.Run("inline", func(t *testing.T) { + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - const arraySize = 4096 + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - r := newRand(t) + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - stringSize := int(maxInlineArrayElementSize - 3) + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, } - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - array, err := NewArray(storage, address, typeInfo) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) - require.NoError(t, err) - } + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) - verifyArray(t, storage, typeInfo, address, array, values, false) + // Same type info is reused. + t.Run("root data slab, multiple levels of inlined array of same type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - stats, err := GetArrayStats(array) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - require.Equal(t, uint64(0), stats.StorableSlabCount) - }) - t.Run("external slab", func(t *testing.T) { + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - const arraySize = 4096 + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) - r := newRand(t) + err = gchildArray.Append(v) + require.NoError(t, err) - stringSize := int(maxInlineArrayElementSize + 512) + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := randStr(r, stringSize) - values[i] = NewStringValue(s) + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = parentArray.Append(childArray) + require.NoError(t, err) + + values[i] = childArray } - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - typeInfo := testTypeInfo{42} + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - for i := uint64(0); i < arraySize; i++ { - err := array.Append(values[i]) - require.NoError(t, err) - } + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - verifyArray(t, storage, typeInfo, address, array, values, false) + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, - stats, err := GetArrayStats(array) + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } + + m, err := storage.Encode() require.NoError(t, err) - require.Equal(t, uint64(arraySize), stats.StorableSlabCount) - }) -} + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) -func TestArrayStoredValue(t *testing.T) { + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - const arraySize = 4096 + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + t.Run("root data slab, multiple levels of inlined array of different type", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + typeInfo4 := testTypeInfo{45} + typeInfo5 := testTypeInfo{46} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) + parentArray, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - } - - rootID := array.SlabID() - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + const arraySize = 2 + values := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + v := Uint64Value(i) - for { - id, slab := slabIterator() + var ti TypeInfo + if i == 0 { + ti = typeInfo2 + } else { + ti = typeInfo4 + } + gchildArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - if id == SlabIDUndefined { - break - } + err = gchildArray.Append(v) + require.NoError(t, err) - value, err := slab.StoredValue(storage) + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo5 + } + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - if id == rootID { + err = childArray.Append(gchildArray) require.NoError(t, err) - array2, ok := value.(*Array) - require.True(t, ok) + err = parentArray.Append(childArray) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, array2, values, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + values[i] = childArray } - } -} - -func TestArrayPopIterate(t *testing.T) { - - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - i := uint64(0) - err = array.PopIterate(func(v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - verifyEmptyArray(t, storage, typeInfo, address, array) - }) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + // (data slab) data: [[0] [1]] + id1: { + // version + 0x11, + // array data slab flag + 0x80, - t.Run("root-dataslab", func(t *testing.T) { + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - const arraySize = 10 + // inlined extra data + 0x84, + // typeInfo3 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + // typeInfo2 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // typeInfo5 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2e, + // typeInfo4 + 0xd8, 0xf7, + 0x81, + 0x18, 0x2d, - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x02, + // CBOR encoded array elements + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x02, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xfa, 0x83, 0x18, 0x03, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x01, + }, + } - array, err := NewArray(storage, address, typeInfo) + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, parentArray.SlabID()) require.NoError(t, err) - require.Equal(t, arraySize, i) - verifyEmptyArray(t, storage, typeInfo, address, array) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - - const arraySize = 4096 + t.Run("root metadata slab, inlined array of same type", func(t *testing.T) { typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + err := array.Append(v) require.NoError(t, err) + + values = append(values, v) } - i := 0 - err = array.PopIterate(func(v Storable) { - vv, err := v.StoredValue(storage) + for i := 0; i < 2; i++ { + childArray, err := NewArray(storage, address, typeInfo2) require.NoError(t, err) - valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) - i++ - }) - require.NoError(t, err) - require.Equal(t, arraySize, i) - - verifyEmptyArray(t, storage, typeInfo, address, array) - }) -} -func TestArrayFromBatchData(t *testing.T) { + err = childArray.Append(Uint64Value(i)) + require.NoError(t, err) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + err = array.Append(childArray) + require.NoError(t, err) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) - require.Equal(t, uint64(0), array.Count()) + values = append(values, childArray) + } - iter, err := array.Iterator() - require.NoError(t, err) + require.Equal(t, uint64(arraySize), array.Count()) - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - verifyEmptyArray(t, storage, typeInfo, address, copied) - }) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ - t.Run("root-dataslab", func(t *testing.T) { + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, - const arraySize = 10 + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v - err := array.Append(v) - require.NoError(t, err) - } + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, - require.Equal(t, uint64(arraySize), array.Count()) + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, + } - iter, err := array.Iterator() + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) - // Create a new array with new storage, new address, and original array's elements. - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), array.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - - const arraySize = 4096 + t.Run("root metadata slab, inlined array of different type", func(t *testing.T) { typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := Uint64Value(i) - values[i] = v + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-2; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + err := array.Append(v) require.NoError(t, err) + + values = append(values, v) } - require.Equal(t, uint64(arraySize), array.Count()) + for i := 0; i < 2; i++ { + var ti TypeInfo + if i == 0 { + ti = typeInfo3 + } else { + ti = typeInfo2 + } - iter, err := array.Iterator() - require.NoError(t, err) + childArray, err := NewArray(storage, address, ti) + require.NoError(t, err) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - storage := newTestPersistentStorage(t) - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + err = childArray.Append(Uint64Value(i)) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + err = array.Append(childArray) + require.NoError(t, err) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + values = append(values, childArray) + } - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + require.Equal(t, uint64(arraySize), array.Count()) - typeInfo := testTypeInfo{42} + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ - var values []Value - var v Value + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:268 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, - v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) - values = append(values, v) + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, - err = array.Insert(0, v) - require.NoError(t, err) + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0c, + }, - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, - err = array.Append(v) - require.NoError(t, err) + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [0] [1]] + id3: { + // version + 0x11, + // array data slab flag + 0x00, + // inlined extra data + 0x82, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, + 0xd8, 0xfa, 0x83, 0x18, 0x01, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x1, + }, } - require.Equal(t, uint64(36), array.Count()) - - iter, err := array.Iterator() + m, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) + verifyArray(t, storage2, typeInfo, address, array2, values, false) }) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) - + t.Run("has pointers", func(t *testing.T) { typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) + array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - var values []Value - var v Value - for i := 0; i < 35; i++ { - v = Uint64Value(i) - values = append(values, v) - err = array.Append(v) + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) require.NoError(t, err) - } + + values = append(values, v) + } + + childArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = childArray.Append(v) + require.NoError(t, err) + } + + err = array.Append(childArray) + require.NoError(t, err) + + values = append(values, childArray) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(5), childArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x0e, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... SlabID(...)] + id3: { + // version (no next slab ID, no inlined slabs) + 0x10, + // array data slab flag + 0x40, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // (data slab) next: 0, data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) + + t.Run("has pointers in inlined slab", func(t *testing.T) { + typeInfo := testTypeInfo{42} + typeInfo2 := testTypeInfo{43} + typeInfo3 := testTypeInfo{44} + storage := newTestBasicStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + const arraySize = 20 + values := make([]Value, 0, arraySize) + for i := uint64(0); i < arraySize-1; i++ { + v := NewStringValue(strings.Repeat("a", 22)) + + err := array.Append(v) + require.NoError(t, err) + + values = append(values, v) + } + + childArray, err := NewArray(storage, address, typeInfo3) + require.NoError(t, err) + + gchildArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + + err = gchildArray.Append(v) + require.NoError(t, err) + } + + err = childArray.Append(gchildArray) + require.NoError(t, err) + + err = array.Append(childArray) + require.NoError(t, err) + + values = append(values, childArray) + + require.Equal(t, uint64(arraySize), array.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(5), gchildArray.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 5}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:287 count:11} ] + id1: { + // version + 0x10, + // flag + 0x81, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2a, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + // child header count + 0x00, 0x02, + // child header 1 (slab index, count, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x09, + 0x00, 0xe4, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x0b, + 0x01, 0x1f, + }, + + // (data slab) next: 3, data: [aaaaaaaaaaaaaaaaaaaaaa ... aaaaaaaaaaaaaaaaaaaaaa] + id2: { + // version + 0x12, + // array data slab flag + 0x00, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x09, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + }, + + // (data slab) next: 0, data: [aaaaaaaaaaaaaaaaaaaaaa ... [SlabID(...)]] + id3: { + // version (no next slab ID, has inlined slabs) + 0x11, + // array data slab flag (has pointer) + 0x40, + + // inlined array of extra data + 0x81, + // type info + 0xd8, 0xf7, + 0x81, + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x0b, + // CBOR encoded array elements + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + }, + + // (data slab) data: [bbbbbbbbbbbbbbbbbbbbbb ...] + id4: { + // version + 0x10, + // extra data flag + 0x80, + + // extra data + // array of extra data + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + m, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(m)) + require.Equal(t, expected[id1], m[id1]) + require.Equal(t, expected[id2], m[id2]) + require.Equal(t, expected[id3], m[id3]) + require.Equal(t, expected[id4], m[id4]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, m) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) + }) +} + +func TestArrayEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + const opCount = 8192 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, values := testArrayAppendSetInsertRemoveRandomValues(t, r, storage, typeInfo, address, opCount) + + verifyArray(t, storage, typeInfo, address, array, values, false) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Test new array from storage2 + array2, err := NewArrayWithRootID(storage2, array.SlabID()) + require.NoError(t, err) + + verifyArray(t, storage2, typeInfo, address, array2, values, false) +} + +func TestEmptyArray(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestBasicStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := array.Get(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("set", func(t *testing.T) { + s, err := array.Set(0, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("insert", func(t *testing.T) { + err := array.Insert(1, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + }) + + t.Run("remove", func(t *testing.T) { + s, err := array.Remove(0) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var indexOutOfBoundsError *IndexOutOfBoundsError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &indexOutOfBoundsError) + require.ErrorAs(t, userError, &indexOutOfBoundsError) + require.Nil(t, s) + }) + + t.Run("iterate", func(t *testing.T) { + i := uint64(0) + err := array.Iterate(func(v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + }) + + t.Run("count", func(t *testing.T) { + count := array.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, array.Type())) + }) + + // TestArrayEncodeDecode/empty tests empty array encoding and decoding +} + +func TestArrayStringElement(t *testing.T) { + + t.Parallel() + + t.Run("inline", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize - 3) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(0), stats.StorableSlabCount) + }) + + t.Run("external slab", func(t *testing.T) { + + const arraySize = 4096 + + r := newRand(t) + + stringSize := int(maxInlineArrayElementSize + 512) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := randStr(r, stringSize) + values[i] = NewStringValue(s) + } + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + typeInfo := testTypeInfo{42} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(values[i]) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, false) + + stats, err := GetArrayStats(array) + require.NoError(t, err) + require.Equal(t, uint64(arraySize), stats.StorableSlabCount) + }) +} + +func TestArrayStoredValue(t *testing.T) { + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + rootID := array.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + array2, ok := value.(*Array) + require.True(t, ok) + + verifyArray(t, storage, typeInfo, address, array2, values, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestArrayPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + i := uint64(0) + err = array.PopIterate(func(v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + i := 0 + err = array.PopIterate(func(v Storable) { + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, values[arraySize-i-1], vv) + i++ + }) + require.NoError(t, err) + require.Equal(t, arraySize, i) + + verifyEmptyArray(t, storage, typeInfo, address, array) + }) +} + +func TestArrayFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + require.Equal(t, uint64(0), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + verifyEmptyArray(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + + const arraySize = 10 + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + // Create a new array with new storage, new address, and original array's elements. + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), array.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := Uint64Value(i) + values[i] = v + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + storage := newTestPersistentStorage(t) + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) + values = append(values, v) + + err = array.Insert(0, v) + require.NoError(t, err) + + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + for i := 0; i < 35; i++ { + v = Uint64Value(i) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + } v = NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize-2))) values = append(values, nil) copy(values[25+1:], values[25:]) values[25] = v - err = array.Insert(25, v) + err = array.Insert(25, v) + require.NoError(t, err) + + require.Equal(t, uint64(36), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const arraySize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := randomValue(r, int(maxInlineArrayElementSize)) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.Equal(t, uint64(arraySize), array.Count()) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + array, err := NewArray( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + typeInfo) + require.NoError(t, err) + + var values []Value + var v Value + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) + values = append(values, v) + err = array.Append(v) + require.NoError(t, err) + + iter, err := array.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + copied, err := NewArrayFromBatchData( + storage, + address, + array.Type(), + func() (Value, error) { + return iter.Next() + }) + + require.NoError(t, err) + require.NotEqual(t, array.SlabID(), copied.SlabID()) + + verifyArray(t, storage, typeInfo, address, copied, values, false) + }) +} + +func TestArrayNestedStorables(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + + const arraySize = 1024 * 4 + + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + for i := uint64(0); i < arraySize; i++ { + s := strings.Repeat("a", int(i)) + v := SomeValue{Value: NewStringValue(s)} + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + verifyArray(t, storage, typeInfo, address, array, values, true) +} + +func TestArrayMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + var values []Value + for i := 0; i < 2; i++ { + // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. + v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) + values = append(values, v) + + err = array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + // Size of root data slab with two elements of max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab), and minus 1 byte + // (for rounding when computing max inline array element size). + require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + + verifyArray(t, storage, typeInfo, address, array, values, false) +} + +func TestArrayString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5]` + require.Equal(t, want, array.String()) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` + require.Equal(t, want, array.String()) + }) +} + +func TestArraySlabDump(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const arraySize = 6 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + } + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const arraySize = 120 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + err := array.Append(Uint64Value(i)) + require.NoError(t, err) + } + + want := []string{ + "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", + "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", + "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("overflow", func(t *testing.T) { + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) + require.NoError(t, err) + + want := []string{ + "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + + dumps, err := DumpArraySlabs(array) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func errorCategorizationCount(err error) int { + var fatalError *FatalError + var userError *UserError + var externalError *ExternalError + + count := 0 + if errors.As(err, &fatalError) { + count++ + } + if errors.As(err, &userError) { + count++ + } + if errors.As(err, &externalError) { + count++ + } + return count +} + +func TestArrayLoadedValueIterator(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, nil) + }) + + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[i+1:] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[:i] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + unloadValueIndex := 1 + + v := values[unloadValueIndex] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 3 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+arraySize, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + i := 0 + err := array.IterateLoadedValues(func(v Value) (bool, error) { + // At this point, iterator returned first element (v). + + // Remove all other nested composite elements (except first element) from storage. + for _, value := range values[1:] { + nestedArray, ok := value.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } + + require.Equal(t, 0, i) + valueEqual(t, typeInfoComparator, values[0], v) + i++ + return true, nil + }) + + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) + + t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 3 + + // Create an array with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) + + array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + + // parent array: 1 root data slab + // nested composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element + v := values[nestedCompositeIndex].(*Array) + + err := storage.Remove(v.SlabID()) + require.NoError(t, err) + + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + require.Equal(t, 3, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[i+1:] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + expectedValues := values[:i] + verifyArrayLoadedElements(t, array, expectedValues) + } + }) + + t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 20 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+arraySize, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite element in the middle + for _, index := range []int{4, 14} { + + v := values[index] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { + const arraySize = 20 + + // Create an array with composite value at specified index. + for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) + + array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + + // parent array: 1 root metadata slab, 2 data slabs + // nested composite value element: 1 root data slab for each + require.Equal(t, 3+1, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + // Unload composite value + v := values[nestedCompositeIndex].(*Array) + + err := storage.Remove(v.SlabID()) + require.NoError(t, err) + + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from front to back + for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload data slabs from back to front + for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := metaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[:len(values)-int(childHeader.count)] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 30 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + + verifyArrayLoadedElements(t, array, values) + + metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + require.True(t, len(metaDataSlab.childrenHeaders) > 2) + + index := 1 + childHeader := metaDataSlab.childrenHeaders[index] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) + values = values[:array.Count()-uint64(childHeader.count)] + + verifyArrayLoadedElements(t, array, values) + }) + + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const arraySize = 250 + array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs + require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + // Unload non-root metadata slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + + childHeader := rootMetaDataSlab.childrenHeaders[i] + + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) + + values = values[childHeader.count:] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + r := newRand(t) + + // Unload random composite element + for len(values) > 0 { + + i := r.Intn(len(values)) + + v := values[i] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + verifyArrayLoadedElements(t, array, values) + } + }) + + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + type slabInfo struct { + id SlabID + startIndex int + count int + } + + count := 0 + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { + nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + for _, h := range nonrootMetaDataSlab.childrenHeaders { + dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} + dataSlabInfos = append(dataSlabInfos, dataSlabInfo) + count += int(h.count) + } + } + + r := newRand(t) + + // Unload random data slab. + for len(dataSlabInfos) > 0 { + indexToUnload := r.Intn(len(dataSlabInfos)) + + slabInfoToUnload := dataSlabInfos[indexToUnload] + + // Update startIndex for all data slabs after indexToUnload. + for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabInfoToUnload.count + } + + // Remove slabInfo to be unloaded from dataSlabInfos. + copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + err := storage.Remove(slabInfoToUnload.id) + require.NoError(t, err) + + copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) + values = values[:len(values)-slabInfoToUnload.count] + + verifyArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) + + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + + storage := newTestPersistentStorage(t) + + const arraySize = 500 + array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + + // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+arraySize) + require.True(t, getArrayMetaDataSlabCount(storage) > 1) + + verifyArrayLoadedElements(t, array, values) + + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo + } + + rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) + require.True(t, ok) + + var dataSlabCount, metadataSlabCount int + nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { + + nonrootMetadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: metadataSlabCount, + count: int(mheader.count), + } + metadataSlabCount += int(mheader.count) + + nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) + require.True(t, ok) + + children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) + for i, h := range nonrootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: dataSlabCount, + count: int(h.count), + } + dataSlabCount += int(h.count) + } + + nonrootMetadataSlabInfo.children = children + nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + } + + r := newRand(t) + + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) + + for len(nonrootMetadataSlabInfos) > 0 { + + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool + + // Unload random metadata or data slab. + switch r.Intn(maxSlabType) { + + case metadataSlabType: + // Unload metadata slab at random index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + + slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence metadata and data slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + + case dataSlabType: + // Unload data slab at randome index. + metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + + metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + + dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + + slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + + isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && + (dataSlabIndex == len(metaSlabInfo.children)-1) + + count := slabInfoToBeRemoved.count + + // Update startIndex for subsequence data slabs. + for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { + metaSlabInfo.children[i].startIndex -= count + } + + copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) + metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + + metaSlabInfo.count -= count + + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { + nonrootMetadataSlabInfos[i].startIndex -= count + + for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { + nonrootMetadataSlabInfos[i].children[j].startIndex -= count + } + } + + if len(metaSlabInfo.children) == 0 { + copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) + nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + } + } + + err := storage.Remove(slabInfoToBeRemoved.id) + require.NoError(t, err) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + verifyArrayLoadedElements(t, array, values) + } + + require.Equal(t, 0, len(values)) + }) +} + +func createArrayWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := rune('a') + for i := 0; i < arraySize; i++ { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + + err := array.Append(values[i]) require.NoError(t, err) + } - require.Equal(t, uint64(36), array.Count()) + return array, values +} + +func createArrayWithCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, +) (*Array, []Value) { + + // Create parent array + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create nested array + nested, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for j := 0; j < 50; j++ { + err = nested.Append(Uint64Value(j)) + require.NoError(t, err) + } + + expectedValues[i] = nested + + // Append nested array to parent + err = array.Append(nested) + require.NoError(t, err) + } + + return array, expectedValues +} + +func createArrayWithSimpleAndCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + arraySize int, + compositeValueIndex int, +) (*Array, []Value) { + require.True(t, compositeValueIndex < arraySize) + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]Value, arraySize) + r := 'a' + for i := 0; i < arraySize; i++ { + + if compositeValueIndex == i { + // Create nested array with one element + a, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + for j := 0; j < 50; j++ { + err = a.Append(Uint64Value(j)) + require.NoError(t, err) + } + + values[i] = a + } else { + values[i] = NewStringValue(strings.Repeat(string(r), 20)) + r++ + } + + err = array.Append(values[i]) + require.NoError(t, err) + } + + return array, values +} + +func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { + i := 0 + err := array.IterateLoadedValues(func(v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, typeInfoComparator, expectedValues[i], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} + +func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*ArrayMetaDataSlab); ok { + counter++ + } + } + return counter +} + +func TestArrayID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + sid := array.SlabID() + id := array.ValueID() + + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} + +func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { + const ( + arraySize = 3 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + values := make([]*testMutableValue, arraySize) + for i := uint64(0); i < arraySize; i++ { + v := newTestMutableValue(initialStorableSize) + values[i] = v + + err := array.Append(v) + require.NoError(t, err) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) + + for i := uint64(0); i < arraySize; i++ { + mv := values[i] + mv.updateStorableSize(mutatedStorableSize) + + existingStorable, err := array.Set(i, mv) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + require.True(t, array.root.IsData()) + + expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize + require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) + + err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) +} + +func TestChildArrayInlinabilityInParentArray(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) - iter, err := array.Iterator() - require.NoError(t, err) + t.Run("parent is root data slab, with one child array", func(t *testing.T) { + const arraySize = 1 + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + // Test parent slab size with 1 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - const arraySize = 4096 + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - r := newRand(t) + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - typeInfo := testTypeInfo{42} + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := randomValue(r, int(maxInlineArrayElementSize)) - values[i] = v + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - err := array.Append(v) + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + err = childArray.Append(v) require.NoError(t, err) - } + require.Equal(t, uint64(i+1), childArray.Count()) - require.Equal(t, uint64(arraySize), array.Count()) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - iter, err := array.Iterator() - require.NoError(t, err) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - storage := newTestPersistentStorage(t) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + err = childArray.Append(v) require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - SetThreshold(256) - defer SetThreshold(1024) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - r := newRand(t) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - array, err := NewArray( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - typeInfo) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - var values []Value - var v Value + // Remove elements from child array which triggers standalone array slab becomes inlined slab again. + for childArray.Count() > 0 { + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - v = NewStringValue(randStr(r, int(maxInlineArrayElementSize-2))) - values = append(values, v) - err = array.Append(v) - require.NoError(t, err) + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - iter, err := array.Iterator() - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + require.Equal(t, uint64(0), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) + + t.Run("parent is root data slab, with two child arrays", func(t *testing.T) { + const arraySize = 2 + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewArrayFromBatchData( - storage, - address, - array.Type(), - func() (Value, error) { - return iter.Next() - }) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - require.NoError(t, err) - require.NotEqual(t, array.SlabID(), copied.SlabID()) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - verifyArray(t, storage, typeInfo, address, copied, values, false) - }) -} + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. -func TestArrayNestedStorables(t *testing.T) { + // Test parent slab size with 2 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Parallel() + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - typeInfo := testTypeInfo{42} + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) - const arraySize = 1024 * 4 + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - values := make([]Value, arraySize) - for i := uint64(0); i < arraySize; i++ { - s := strings.Repeat("a", int(i)) - v := SomeValue{Value: NewStringValue(s)} - values[i] = v + children[i].array = childArray + children[i].valueID = valueID + } - err := array.Append(v) - require.NoError(t, err) - } + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - verifyArray(t, storage, typeInfo, address, array, values, true) -} + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArrayMaxInlineElement(t *testing.T) { - t.Parallel() + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) - r := newRand(t) + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - var values []Value - for i := 0; i < 2; i++ { - // String length is MaxInlineArrayElementSize - 3 to account for string encoding overhead. - v := NewStringValue(randStr(r, int(maxInlineArrayElementSize-3))) - values = append(values, v) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - err = array.Append(v) - require.NoError(t, err) - } + expectedStoredDeltas := 1 - require.True(t, array.root.IsData()) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childArray := child.array + childValueID := child.valueID - // Size of root data slab with two elements of max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab), and minus 1 byte - // (for rounding when computing max inline array element size). - require.Equal(t, targetThreshold-slabIDSize-1, uint64(array.root.Header().size)) + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) - verifyArray(t, storage, typeInfo, address, array, values, false) -} + expectedStoredDeltas++ + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) // There are more stored slab because child array is no longer inlined. -func TestArrayString(t *testing.T) { + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - SetThreshold(256) - defer SetThreshold(1024) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + //expectedParentSize := arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize -= inlinedArrayDataSlabPrefixSize + uint32(childArray.Count()-1)*vSize + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for _, child := range children { + childArray := child.array + childValueID := child.valueID - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + existingStorable, err := childArray.Remove(0) require.NoError(t, err) - } + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - want := `[0 1 2 3 4 5]` - require.Equal(t, want, array.String()) - }) + require.True(t, childArray.Inlined()) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + expectedStoredDeltas-- + require.Equal(t, expectedStoredDeltas, getStoredDeltas(storage)) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + expectedParentSize -= SlabIDStorable{}.ByteSize() + expectedParentSize += expectedInlinedSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - want := `[0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]` - require.Equal(t, want, array.String()) - }) -} + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID -func TestArraySlabDump(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - t.Run("small", func(t *testing.T) { - const arraySize = 6 + require.True(t, childArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:23 count:6 elements: [0 1 2 3 4 5]", + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("large", func(t *testing.T) { - const arraySize = 120 + t.Run("parent is root metadata slab, with four child arrays", func(t *testing.T) { + const arraySize = 4 typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - for i := uint64(0); i < arraySize; i++ { - err := array.Append(Uint64Value(i)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. + + // Test parent slab size with 4 empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + children[i].array = childArray + children[i].valueID = valueID } - want := []string{ - "level 1, ArrayMetaDataSlab id:0x102030405060708.1 size:40 count:120 children: [{id:0x102030405060708.2 size:213 count:54} {id:0x102030405060708.3 size:285 count:66}]", - "level 2, ArrayDataSlab id:0x102030405060708.2 size:213 count:54 elements: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53]", - "level 2, ArrayDataSlab id:0x102030405060708.3 size:285 count:66 elements: [54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119]", + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size + + // Appending 10 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 10; i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), childArray.Count()) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - t.Run("overflow", func(t *testing.T) { + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childArray := child.array + childValueID := child.valueID - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + err := childArray.Append(v) + require.NoError(t, err) + require.False(t, childArray.Inlined()) - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + expectedSlabID := valueIDToSlabID(childValueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - err = array.Append(NewStringValue(strings.Repeat("a", int(maxInlineArrayElementSize)))) - require.NoError(t, err) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - want := []string{ - "level 1, ArrayDataSlab id:0x102030405060708.1 size:24 count:1 elements: [SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + + // Parent array has one data slab and all child arrays are not inlined. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) + + // Remove one element from child array which triggers standalone array slab becomes inlined slab again. + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - dumps, err := DumpArraySlabs(array) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) + + // Remove remaining elements from inlined child array + childArrayCount := children[0].array.Count() + for i := 0; i < int(childArrayCount); i++ { + for _, child := range children { + childArray := child.array + childValueID := child.valueID + + existingStorable, err := childArray.Remove(0) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) + + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, childValueID, childArray.ValueID()) // value ID is unchanged + + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } -func errorCategorizationCount(err error) int { - var fatalError *FatalError - var userError *UserError - var externalError *ExternalError + // Parent array has 1 data slab. + // All child arrays are inlined. + require.Equal(t, 1, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - count := 0 - if errors.As(err, &fatalError) { - count++ - } - if errors.As(err, &userError) { - count++ - } - if errors.As(err, &externalError) { - count++ - } - return count + for _, child := range children { + require.Equal(t, uint64(0), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + }) } -func TestArrayLoadedValueIterator(t *testing.T) { +func TestNestedThreeLevelChildArrayInlinabilityInParentArray(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 - t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, nil) - }) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - const arraySize = 3 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - // parent array: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - verifyArrayLoadedElements(t, array, values) - }) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Get inlined grand child array + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) - verifyArrayLoadedElements(t, array, values) - }) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - t.Run("root data slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root data slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Add one more element to grand child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + err = gchildArray.Append(v) + require.NoError(t, err) - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + existingStorable, err := gchildArray.Remove(0) require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root data slab with composite values, unload composite element in the middle", func(t *testing.T) { + t.Run("parent is root data slab, one child array, one grand child array, changes to grand child array triggers grand child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 1 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Create an array with empty child array as element, which has empty child array. + parentArray, expectedValues := createArrayWithEmpty2LevelChildArray(t, storage, address, typeInfo, arraySize) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, values) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // Unload composite element in the middle - unloadValueIndex := 1 + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - v := values[unloadValueIndex] + // Get inlined child array + e, err := parentArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - nestedArray, ok := v.(*Array) + childArray, ok := e.(*Array) require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(nestedArray.SlabID()) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + // Get inlined grand child array + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) - verifyArrayLoadedElements(t, array, values) - }) + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { - storage := newTestPersistentStorage(t) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - const arraySize = 3 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Appending 8 elements to grand child array so that inlined grand child array reaches max inlined size as array element. + for i := 0; i < 8; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) - // parent array: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+arraySize, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - // At this point, iterator returned first element (v). + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // Remove all other nested composite elements (except first element) from storage. - for _, value := range values[1:] { - nestedArray, ok := value.(*Array) - require.True(t, ok) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0], v) - i++ - return true, nil - }) + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + // Add one more element to grand child array which triggers inlined grand child array slab (NOT child array slab) becomes standalone slab + largeValue := NewStringValue(strings.Repeat("b", 20)) + largeValueSize := largeValue.ByteSize() + err = gchildArray.Append(largeValue) require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) - t.Run("root data slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 3 + require.False(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) // There are 2 stored slab because child array is no longer inlined. - // Create an array with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + expectedSlabID := valueIDToSlabID(gValueID) + require.Equal(t, expectedSlabID, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root data slab - // nested composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := arrayRootDataSlabPrefixSize + uint32(gchildArray.Count()-1)*vSize + largeValueSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + expectedStandaloneSlabSize := inlinedArrayDataSlabPrefixSize + SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - // Unload composite element - v := values[nestedCompositeIndex].(*Array) + expectedParentSize = arrayRootDataSlabPrefixSize + expectedStandaloneSlabSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - err := storage.Remove(v.SlabID()) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + + // Remove elements from grand child array which triggers standalone child array slab becomes inlined slab again. + for gchildArray.Count() > 0 { + _, err := gchildArray.Remove(gchildArray.Count() - 1) require.NoError(t, err) + // require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged + + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize := arrayRootDataSlabPrefixSize + expectedInlinedChildSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } + + require.Equal(t, uint64(0), gchildArray.Count()) + require.Equal(t, uint64(1), childArray.Count()) + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root metadata slab with simple values", func(t *testing.T) { + t.Run("parent is root data slab, two child array, one grand child array each, changes to child array triggers child array slab to become standalone slab", func(t *testing.T) { + const arraySize = 2 + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const arraySize = 20 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - // parent array: 1 root metadata slab, 2 data slabs - require.Equal(t, 3, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + // Append element to grand child array + err = gchild.Append(v) + require.NoError(t, err) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) - }) + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - t.Run("root metadata slab with composite values, unload composite element from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues[i] = child + } - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test parent slab size with 1 inlined child array + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + vSize*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i] + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - nestedArray, ok := v.(*Array) + children := make([]arrayInfo, arraySize) + + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) + + childArray, ok := e.(*Array) require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(nestedArray.SlabID()) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) + + e, err = childArray.Get(0) require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - expectedValues := values[i+1:] - verifyArrayLoadedElements(t, array, expectedValues) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } } - }) - t.Run("root metadata slab with composite values, unload composite element from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 7 elements to child array so that inlined child array reaches max inlined size as array element. + for i := 0; i < 7; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+2), childArray.Count()) - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i] + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Test inlined grand child slab size (1 element, unchanged) + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) + + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(i+1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + // Test parent slab size + expectedParentSize += vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Add one more element to child array which triggers inlined child array slab (NOT grand child array slab) becomes standalone slab + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - err := storage.Remove(nestedArray.SlabID()) + err = childArray.Append(v) require.NoError(t, err) - expectedValues := values[:i] - verifyArrayLoadedElements(t, array, expectedValues) - } - }) + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) + require.Equal(t, 2+i, getStoredDeltas(storage)) // There are >1 stored slab because child array is no longer inlined. - t.Run("root metadata slab with composite values, unload composite element in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - const arraySize = 20 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+arraySize, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + expectedStandaloneSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedStandaloneSlabSize, childArray.root.ByteSize()) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - v := values[index] + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedParentSize = arrayRootDataSlabPrefixSize + SlabIDStorable(SlabID{}).ByteSize()*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Remove one elements from each child array to trigger child arrays being inlined again. + expectedParentSize = arrayRootDataSlabPrefixSize - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + for i, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - verifyArrayLoadedElements(t, array, values) - } - }) + _, err = childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) - t.Run("root metadata slab with simple and composite values, unload composite element", func(t *testing.T) { - const arraySize = 20 + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 2-i, getStoredDeltas(storage)) - // Create an array with composite value at specified index. - for nestedCompositeIndex := 0; nestedCompositeIndex < arraySize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - array, values := createArrayWithSimpleAndCompositeValues(t, storage, address, typeInfo, arraySize, nestedCompositeIndex) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - // parent array: 1 root metadata slab, 2 data slabs - // nested composite value element: 1 root data slab for each - require.Equal(t, 3+1, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - // Unload composite value - v := values[nestedCompositeIndex].(*Array) + expectedParentSize += expectedInlinedChildSize - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) - } - }) + // Remove elements from child array. + elementCount := children[0].array.Count() - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + for i := uint64(0); i < elementCount-1; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + existingStorable, err := childArray.Remove(childArray.Count() - 1) + require.NoError(t, err) + require.Equal(t, NewStringValue(strings.Repeat("a", 9)), existingStorable) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - // Unload data slabs from front to back - for i := 0; i < len(metaDataSlab.childrenHeaders); i++ { + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - childHeader := metaDataSlab.childrenHeaders[i] + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + vSize*uint32(childArray.Count()-1) + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Test parent slab size + expectedParentSize -= vSize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - values = values[childHeader.count:] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } - verifyArrayLoadedElements(t, array, values) + for _, child := range children { + require.Equal(t, uint64(1), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) } + require.Equal(t, uint64(arraySize), parentArray.Count()) }) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) - - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + t.Run("parent is root metadata slab, with four child arrays, each child array has grand child arrays", func(t *testing.T) { + const arraySize = 4 - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - verifyArrayLoadedElements(t, array, values) + v := NewStringValue(strings.Repeat("a", 9)) + vSize := v.size - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + // Create parent array + parentArray, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // Unload data slabs from back to front - for i := len(metaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + expectedValues := make([]Value, arraySize) + for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - childHeader := metaDataSlab.childrenHeaders[i] + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - err := storage.Remove(childHeader.slabID) + // Append grand child array to child array + err = child.Append(gchild) require.NoError(t, err) - values = values[:len(values)-int(childHeader.count)] + // Append child array to parent + err = parentArray.Append(child) + require.NoError(t, err) - verifyArrayLoadedElements(t, array, values) + expectedValues[i] = child } - }) - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - const arraySize = 30 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*2*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - // parent array (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getArrayMetaDataSlabCount(storage)) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - verifyArrayLoadedElements(t, array, values) + type arrayInfo struct { + array *Array + valueID ValueID + child *arrayInfo + } - metaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + children := make([]arrayInfo, arraySize) - require.True(t, len(metaDataSlab.childrenHeaders) > 2) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - index := 1 - childHeader := metaDataSlab.childrenHeaders[index] + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - copy(values[metaDataSlab.childrenCountSum[index-1]:], values[metaDataSlab.childrenCountSum[index]:]) - values = values[:array.Count()-uint64(childHeader.count)] + e, err = childArray.Get(0) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - verifyArrayLoadedElements(t, array, values) - }) + gchildArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, gchildArray.Inlined()) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + + gValueID := gchildArray.ValueID() + require.Equal(t, address[:], gValueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], gValueID[slabAddressSize:]) + require.NotEqual(t, valueID[slabAddressSize:], gValueID[slabAddressSize:]) + + children[i] = arrayInfo{ + array: childArray, + valueID: valueID, + child: &arrayInfo{array: gchildArray, valueID: gValueID}, + } + } - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 6 elements to grand child array so that parent array root slab is metadata slab. + for i := uint32(0); i < 6; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + err := gchildArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(i+1), gchildArray.Count()) - // parent array (3 levels): 1 root metadata slab, 2 non-root metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + require.True(t, childArray.Inlined()) + require.True(t, gchildArray.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - // Unload non-root metadata slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + vSize*(i+1) + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - err := storage.Remove(childHeader.slabID) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + } + + // Add one more element to grand child array which triggers parent array slab becomes metadata slab (all elements are still inlined). + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID + + err = gchildArray.Append(v) require.NoError(t, err) - values = values[childHeader.count:] + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because parent root slab is metdata. - verifyArrayLoadedElements(t, array, values) - } - }) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - const arraySize = 250 - array, values := createArrayWithSimpleValues(t, storage, address, typeInfo, arraySize) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - // parent array (3 levels): 1 root metadata slab, 2 child metadata slabs, n data slabs - require.Equal(t, 3, getArrayMetaDataSlabCount(storage)) + expectedInlinedChildSlabSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSlabSize, childArray.root.ByteSize()) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - // Unload non-root metadata slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child array is no longer inlined. + require.False(t, parentArray.root.IsData()) - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Add one more element to grand child array which triggers + // - child arrays become standalone slab (grand child arrays are still inlined) + // - parent array slab becomes data slab + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + for i := 0; i < 2; i++ { + err = gchildArray.Append(v) + require.NoError(t, err) + } - values = values[childHeader.count:] + require.True(t, gchildArray.Inlined()) + require.False(t, childArray.Inlined()) - verifyArrayLoadedElements(t, array, values) - } - }) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with composite values, unload random composite value", func(t *testing.T) { + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - storage := newTestPersistentStorage(t) + // Test standalone grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + expectedStandaloneChildSlabSize := arrayRootDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedStandaloneChildSlabSize, childArray.root.ByteSize()) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - verifyArrayLoadedElements(t, array, values) + // Parent array has one root data slab, 4 grand child array with standalone root data slab. + require.Equal(t, 1+arraySize, getStoredDeltas(storage)) + require.True(t, parentArray.root.IsData()) - r := newRand(t) + // Remove elements from grand child array to trigger child array inlined again. + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - // Unload random composite element - for len(values) > 0 { + for i := 0; i < 2; i++ { + _, err = gchildArray.Remove(0) + require.NoError(t, err) + } - i := r.Intn(len(values)) + require.True(t, gchildArray.Inlined()) + require.True(t, childArray.Inlined()) - v := values[i] + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childArray.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildArray.ValueID()) // Value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) - verifyArrayLoadedElements(t, array, values) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + // Parent array has 1 metadata slab, and two data slab, all child and grand child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentArray.root.IsData()) - storage := newTestPersistentStorage(t) + // Remove elements from grand child array. + elementCount := children[0].child.array.Count() - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + for i := uint64(0); i < elementCount; i++ { + for _, child := range children { + childArray := child.array + valueID := child.valueID + gchildArray := child.child.array + gValueID := child.child.valueID - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + existingStorable, err := gchildArray.Remove(0) + require.NoError(t, err) + require.Equal(t, v, existingStorable) - verifyArrayLoadedElements(t, array, values) + require.True(t, gchildArray.Inlined()) + require.True(t, gchildArray.Inlined()) - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) + require.Equal(t, valueID, childArray.ValueID()) // value ID is unchanged - type slabInfo struct { - id SlabID - startIndex int - count int - } + require.Equal(t, SlabIDUndefined, gchildArray.SlabID()) + require.Equal(t, gValueID, gchildArray.ValueID()) // value ID is unchanged - count := 0 - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { - nonrootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + // Test inlined grand child slab size + expectedInlinedGrandChildSize := inlinedArrayDataSlabPrefixSize + uint32(gchildArray.Count())*vSize + require.Equal(t, expectedInlinedGrandChildSize, gchildArray.root.ByteSize()) - for _, h := range nonrootMetaDataSlab.childrenHeaders { - dataSlabInfo := &slabInfo{id: h.slabID, startIndex: count, count: int(h.count)} - dataSlabInfos = append(dataSlabInfos, dataSlabInfo) - count += int(h.count) + // Test inlined child slab size + expectedInlinedChildSize := inlinedArrayDataSlabPrefixSize + expectedInlinedGrandChildSize + require.Equal(t, expectedInlinedChildSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } } - r := newRand(t) + for _, child := range children { + require.Equal(t, uint64(0), child.child.array.Count()) + require.Equal(t, uint64(1), child.array.Count()) + } + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Unload random data slab. - for len(dataSlabInfos) > 0 { - indexToUnload := r.Intn(len(dataSlabInfos)) + expectedParentSize = uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize*2 + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) + }) +} - slabInfoToUnload := dataSlabInfos[indexToUnload] +func TestChildArrayWhenParentArrayIsModified(t *testing.T) { - // Update startIndex for all data slabs after indexToUnload. - for i := indexToUnload + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabInfoToUnload.count - } + const arraySize = 2 - // Remove slabInfo to be unloaded from dataSlabInfos. - copy(dataSlabInfos[indexToUnload:], dataSlabInfos[indexToUnload+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - err := storage.Remove(slabInfoToUnload.id) - require.NoError(t, err) + // Create an array with empty child array as element. + parentArray, expectedValues := createArrayWithEmptyChildArray(t, storage, address, typeInfo, arraySize) - copy(values[slabInfoToUnload.startIndex:], values[slabInfoToUnload.startIndex+slabInfoToUnload.count:]) - values = values[:len(values)-slabInfoToUnload.count] + require.Equal(t, uint64(arraySize), parentArray.Count()) + require.True(t, parentArray.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child array is inlined. - verifyArrayLoadedElements(t, array, values) - } + // Test parent slab size with empty inlined child arrays + expectedParentSize := uint32(arrayRootDataSlabPrefixSize) + uint32(inlinedArrayDataSlabPrefixSize)*arraySize + require.Equal(t, expectedParentSize, parentArray.root.ByteSize()) - require.Equal(t, 0, len(values)) - }) + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + children := make([]struct { + array *Array + valueID ValueID + }, arraySize) - storage := newTestPersistentStorage(t) + for i := 0; i < arraySize; i++ { + e, err := parentArray.Get(uint64(i)) + require.NoError(t, err) + require.Equal(t, 1, getStoredDeltas(storage)) - const arraySize = 500 - array, values := createArrayWithCompositeValues(t, storage, address, typeInfo, arraySize) + childArray, ok := e.(*Array) + require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) - // parent array (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+arraySize) - require.True(t, getArrayMetaDataSlabCount(storage) > 1) + valueID := childArray.ValueID() + require.Equal(t, address[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) - verifyArrayLoadedElements(t, array, values) + children[i].array = childArray + children[i].valueID = valueID + } + + t.Run("insert elements in parent array", func(t *testing.T) { + // insert value at index 0, so all child array indexes are moved by +1 + v := Uint64Value(0) + err := parentArray.Insert(0, v) + require.NoError(t, err) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo - } + expectedValues = append(expectedValues, nil) + copy(expectedValues[1:], expectedValues) + expectedValues[0] = v - rootMetaDataSlab, ok := array.root.(*ArrayMetaDataSlab) - require.True(t, ok) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - var dataSlabCount, metadataSlabCount int - nonrootMetadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + v := Uint64Value(i) + vSize := v.ByteSize() - nonrootMetadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: metadataSlabCount, - count: int(mheader.count), - } - metadataSlabCount += int(mheader.count) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(1), childArray.Count()) - nonrootMetadataSlab, ok := storage.deltas[mheader.slabID].(*ArrayMetaDataSlab) - require.True(t, ok) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - children := make([]*slabInfo, len(nonrootMetadataSlab.childrenHeaders)) - for i, h := range nonrootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: dataSlabCount, - count: int(h.count), - } - dataSlabCount += int(h.count) - } + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - nonrootMetadataSlabInfo.children = children - nonrootMetadataSlabInfos[i] = nonrootMetadataSlabInfo + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - r := newRand(t) + // insert value at index 2, so only second child array index is moved by +1 + v = Uint64Value(2) + err = parentArray.Insert(2, v) + require.NoError(t, err) - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + expectedValues = append(expectedValues, nil) + copy(expectedValues[3:], expectedValues[2:]) + expectedValues[2] = v - for len(nonrootMetadataSlabInfos) > 0 { + for i, child := range children { + childArray := child.array + childValueID := child.valueID - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + v := Uint64Value(i) + vSize := v.ByteSize() - // Unload random metadata or data slab. - switch r.Intn(maxSlabType) { + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(2), childArray.Count()) - case metadataSlabType: - // Unload metadata slab at random index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - isLastSlab = metadataSlabIndex == len(nonrootMetadataSlabInfos)-1 + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - slabInfoToBeRemoved = nonrootMetadataSlabInfos[metadataSlabIndex] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - count := slabInfoToBeRemoved.count + // insert value at index 4, so none of child array indexes are affected. + v = Uint64Value(4) + err = parentArray.Insert(4, v) + require.NoError(t, err) - // Update startIndex for subsequence metadata and data slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + expectedValues = append(expectedValues, nil) + expectedValues[4] = v - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + for i, child := range children { + childArray := child.array + childValueID := child.valueID - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] + v := Uint64Value(i) + vSize := v.ByteSize() - case dataSlabType: - // Unload data slab at randome index. - metadataSlabIndex := r.Intn(len(nonrootMetadataSlabInfos)) + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(3), childArray.Count()) - metaSlabInfo := nonrootMetadataSlabInfos[metadataSlabIndex] + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - dataSlabIndex := r.Intn(len(metaSlabInfo.children)) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - slabInfoToBeRemoved = metaSlabInfo.children[dataSlabIndex] + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + }) - isLastSlab = (metadataSlabIndex == len(nonrootMetadataSlabInfos)-1) && - (dataSlabIndex == len(metaSlabInfo.children)-1) + t.Run("remove elements from parent array", func(t *testing.T) { + // remove value at index 0, so all child array indexes are moved by -1. + existingStorable, err := parentArray.Remove(0) + require.NoError(t, err) + require.Equal(t, Uint64Value(0), existingStorable) - count := slabInfoToBeRemoved.count + copy(expectedValues, expectedValues[1:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // Update startIndex for subsequence data slabs. - for i := dataSlabIndex + 1; i < len(metaSlabInfo.children); i++ { - metaSlabInfo.children[i].startIndex -= count - } + for i, child := range children { + childArray := child.array + childValueID := child.valueID - copy(metaSlabInfo.children[dataSlabIndex:], metaSlabInfo.children[dataSlabIndex+1:]) - metaSlabInfo.children = metaSlabInfo.children[:len(metaSlabInfo.children)-1] + v := Uint64Value(i) + vSize := v.ByteSize() - metaSlabInfo.count -= count + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(4), childArray.Count()) - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(nonrootMetadataSlabInfos); i++ { - nonrootMetadataSlabInfos[i].startIndex -= count + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - for j := 0; j < len(nonrootMetadataSlabInfos[i].children); j++ { - nonrootMetadataSlabInfos[i].children[j].startIndex -= count - } - } + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) - if len(metaSlabInfo.children) == 0 { - copy(nonrootMetadataSlabInfos[metadataSlabIndex:], nonrootMetadataSlabInfos[metadataSlabIndex+1:]) - nonrootMetadataSlabInfos = nonrootMetadataSlabInfos[:len(nonrootMetadataSlabInfos)-1] - } - } + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } - err := storage.Remove(slabInfoToBeRemoved.id) + // Remove value at index 1, so only second child array index is moved by -1 + existingStorable, err = parentArray.Remove(1) + require.NoError(t, err) + require.Equal(t, Uint64Value(2), existingStorable) + + copy(expectedValues[1:], expectedValues[2:]) + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] + + for i, child := range children { + childArray := child.array + childValueID := child.valueID + + v := Uint64Value(i) + vSize := v.ByteSize() + + err := childArray.Append(v) require.NoError(t, err) + require.Equal(t, uint64(5), childArray.Count()) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged - verifyArrayLoadedElements(t, array, values) + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) } - require.Equal(t, 0, len(values)) - }) -} + // Remove value at index 2 (last element), so none of child array indexes are affected. + existingStorable, err = parentArray.Remove(2) + require.NoError(t, err) + require.Equal(t, Uint64Value(4), existingStorable) -func createArrayWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - arraySize int, -) (*Array, []Value) { + expectedValues[len(expectedValues)-1] = nil + expectedValues = expectedValues[:len(expectedValues)-1] - // Create parent array - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + for i, child := range children { + childArray := child.array + childValueID := child.valueID - values := make([]Value, arraySize) - r := rune('a') - for i := 0; i < arraySize; i++ { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) + v := Uint64Value(i) + vSize := v.ByteSize() - err := array.Append(values[i]) - require.NoError(t, err) - } + err := childArray.Append(v) + require.NoError(t, err) + require.Equal(t, uint64(6), childArray.Count()) - return array, values + require.True(t, childArray.Inlined()) + require.Equal(t, SlabIDUndefined, childArray.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childArray.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedInlinedSize := inlinedArrayDataSlabPrefixSize + uint32(childArray.Count())*vSize + require.Equal(t, expectedInlinedSize, childArray.root.ByteSize()) + + verifyArray(t, storage, typeInfo, address, parentArray, expectedValues, true) + } + }) } -func createArrayWithCompositeValues( +func createArrayWithEmptyChildArray( t *testing.T, storage SlabStorage, address Address, @@ -3573,143 +6143,62 @@ func createArrayWithCompositeValues( expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) + // Create child array + child, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - err = nested.Append(Uint64Value(i)) + // Append child array to parent + err = array.Append(child) require.NoError(t, err) - expectedValues[i] = nested - - // Append nested array to parent - err = array.Append(nested) - require.NoError(t, err) + expectedValues[i] = child } return array, expectedValues } -func createArrayWithSimpleAndCompositeValues( +func createArrayWithEmpty2LevelChildArray( t *testing.T, storage SlabStorage, address Address, typeInfo TypeInfo, arraySize int, - compositeValueIndex int, ) (*Array, []Value) { - require.True(t, compositeValueIndex < arraySize) + // Create parent array array, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - values := make([]Value, arraySize) - r := 'a' + expectedValues := make([]Value, arraySize) for i := 0; i < arraySize; i++ { + // Create child array + child, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + // Create grand child array + gchild, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - values[i] = a - } else { - values[i] = NewStringValue(strings.Repeat(string(r), 20)) - r++ - } + // Append grand child array to child array + err = child.Append(gchild) + require.NoError(t, err) - err = array.Append(values[i]) + // Append child array to parent + err = array.Append(child) require.NoError(t, err) - } - return array, values -} + expectedValues[i] = child + } -func verifyArrayLoadedElements(t *testing.T, array *Array, expectedValues []Value) { - i := 0 - err := array.IterateLoadedValues(func(v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i], v) - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) + return array, expectedValues } -func getArrayMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int +func getStoredDeltas(storage *PersistentSlabStorage) int { + count := 0 for _, slab := range storage.deltas { - if _, ok := slab.(*ArrayMetaDataSlab); ok { - counter++ + if slab != nil { + count++ } } - return counter -} - -func TestArrayID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - sid := array.SlabID() - id := array.ValueID() - - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} - -func TestSlabSizeWhenResettingMutableStorable(t *testing.T) { - const ( - arraySize = 3 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) - - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) - - values := make([]*mutableValue, arraySize) - for i := uint64(0); i < arraySize; i++ { - v := newMutableValue(initialStorableSize) - values[i] = v - - err := array.Append(v) - require.NoError(t, err) - } - - require.True(t, array.root.IsData()) - - expectedArrayRootDataSlabSize := arrayRootDataSlabPrefixSize + initialStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) - - for i := uint64(0); i < arraySize; i++ { - mv := values[i] - mv.updateStorableSize(mutatedStorableSize) - - existingStorable, err := array.Set(i, mv) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } - - require.True(t, array.root.IsData()) - - expectedArrayRootDataSlabSize = arrayRootDataSlabPrefixSize + mutatedStorableSize*arraySize - require.Equal(t, uint32(expectedArrayRootDataSlabSize), array.root.ByteSize()) - - err = ValidArray(array, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) + return count } diff --git a/basicarray.go b/basicarray.go index b5267e4c..143bec35 100644 --- a/basicarray.go +++ b/basicarray.go @@ -76,7 +76,7 @@ func newBasicArrayDataSlabFromData( ) } - cborDec := decMode.NewByteStreamDecoder(data[2:]) + cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) elemCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -85,7 +85,7 @@ func newBasicArrayDataSlabFromData( elements := make([]Storable, elemCount) for i := 0; i < int(elemCount); i++ { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode array element") @@ -101,10 +101,17 @@ func newBasicArrayDataSlabFromData( func (a *BasicArrayDataSlab) Encode(enc *Encoder) error { - flag := maskBasicArray | maskSlabRoot + const version = 1 + + h, err := newArraySlabHead(version, slabBasicArray) + if err != nil { + return NewEncodingError(err) + } + + h.setRoot() // Encode flag - _, err := enc.Write([]byte{0x0, flag}) + _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) } diff --git a/cmd/main/main.go b/cmd/main/main.go index 3e0cf470..3b2eaebd 100644 --- a/cmd/main/main.go +++ b/cmd/main/main.go @@ -77,6 +77,14 @@ type testTypeInfo struct{} var _ atree.TypeInfo = testTypeInfo{} +func (testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (testTypeInfo) Encode(e *cbor.StreamEncoder) error { return e.EncodeUint8(42) } @@ -86,7 +94,7 @@ func (i testTypeInfo) Equal(other atree.TypeInfo) bool { return ok } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { tagNumber, err := dec.DecodeTagNumber() if err != nil { return nil, err diff --git a/cmd/stress/storable.go b/cmd/stress/storable.go index b3fba90a..a2bdf1da 100644 --- a/cmd/stress/storable.go +++ b/cmd/stress/storable.go @@ -413,7 +413,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID) (atree.Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, _ atree.SlabID, _ []atree.ExtraData) (atree.Storable, error) { t, err := dec.NextType() if err != nil { return nil, err diff --git a/cmd/stress/typeinfo.go b/cmd/stress/typeinfo.go index 4618dc12..b14c212b 100644 --- a/cmd/stress/typeinfo.go +++ b/cmd/stress/typeinfo.go @@ -19,6 +19,8 @@ package main import ( + "fmt" + "github.com/onflow/atree" "github.com/fxamacker/cbor/v2" @@ -30,6 +32,14 @@ type testTypeInfo struct { var _ atree.TypeInfo = testTypeInfo{} +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(e *cbor.StreamEncoder) error { return e.EncodeUint64(i.value) } diff --git a/encode.go b/encode.go index c88fa3a8..5f46505c 100644 --- a/encode.go +++ b/encode.go @@ -30,19 +30,45 @@ type Encoder struct { io.Writer CBOR *cbor.StreamEncoder Scratch [64]byte + encMode cbor.EncMode } func NewEncoder(w io.Writer, encMode cbor.EncMode) *Encoder { streamEncoder := encMode.NewStreamEncoder(w) return &Encoder{ - Writer: w, - CBOR: streamEncoder, + Writer: w, + CBOR: streamEncoder, + encMode: encMode, } } +// encodeStorableAsElement encodes storable as Array or OrderedMap element. +// Storable is encode as an inlined ArrayDataSlab or MapDataSlab if it is ArrayDataSlab or MapDataSlab. +func encodeStorableAsElement(enc *Encoder, storable Storable, inlinedTypeInfo *inlinedExtraData) error { + + switch storable := storable.(type) { + + case *ArrayDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + case *MapDataSlab: + return storable.encodeAsInlined(enc, inlinedTypeInfo) + + default: + err := storable.Encode(enc) + if err != nil { + // Wrap err as external error (if needed) because err is returned by Storable interface. + return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") + } + } + + return nil +} + type StorableDecoder func( decoder *cbor.StreamDecoder, storableSlabID SlabID, + inlinedExtraData []ExtraData, ) ( Storable, error, @@ -101,7 +127,7 @@ func DecodeSlab( case slabStorable: cborDec := decMode.NewByteStreamDecoder(data[versionAndFlagSize:]) - storable, err := decodeStorable(cborDec, id) + storable, err := decodeStorable(cborDec, id, nil) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode slab storable") @@ -116,7 +142,6 @@ func DecodeSlab( } } -// TODO: make it inline func GetUintCBORSize(n uint64) uint32 { if n <= 23 { return 1 diff --git a/map.go b/map.go index 7d9eabf8..f9f6b596 100644 --- a/map.go +++ b/map.go @@ -19,6 +19,7 @@ package atree import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -83,6 +84,14 @@ const ( // CircleHash64fx and SipHash might use this const as part of their // 128-bit seed (when they don't use 64-bit -> 128-bit seed expansion func). typicalRandomConstant = uint64(0x1BD11BDAA9FC1A22) // DO NOT MODIFY + + // inlined map data slab prefix size: + // tag number (2 bytes) + + // 3-element array head (1 byte) + + // extra data ref index (2 bytes) [0, 255] + + // value index head (1 byte) + + // value index (8 bytes) + inlinedMapDataSlabPrefixSize = 2 + 1 + 2 + 1 + 8 ) // MaxCollisionLimitPerDigest is the noncryptographic hash collision limit @@ -134,7 +143,7 @@ type element interface { key Value, ) (MapKey, MapValue, element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error hasPointer() bool @@ -174,7 +183,8 @@ type elements interface { Element(int) (element, error) - Encode(*Encoder) error + Encode(*Encoder, *inlinedExtraData) error + EncodeCompositeValues(*Encoder, []MapKey, *inlinedExtraData) error hasPointer() bool @@ -239,6 +249,8 @@ type MapExtraData struct { Seed uint64 } +var _ ExtraData = &MapExtraData{} + // MapDataSlab is leaf node, implementing MapSlab. // anySize is true for data slab that isn't restricted by size requirement. type MapDataSlab struct { @@ -253,9 +265,11 @@ type MapDataSlab struct { anySize bool collisionGroup bool + inlined bool } var _ MapSlab = &MapDataSlab{} +var _ Storable = &MapDataSlab{} // MapMetaDataSlab is internal node, implementing MapSlab. type MapMetaDataSlab struct { @@ -292,15 +306,20 @@ type MapSlab interface { SetExtraData(*MapExtraData) PopIterate(SlabStorage, MapPopIterationFunc) error + + Inlined() bool + Inlinable(maxInlineSize uint64) bool } type OrderedMap struct { Storage SlabStorage root MapSlab digesterBuilder DigesterBuilder + parentUpdater parentUpdater } var _ Value = &OrderedMap{} +var _ valueNotifier = &OrderedMap{} const mapExtraDataLength = 3 @@ -365,6 +384,10 @@ func newMapExtraData(dec *cbor.StreamDecoder, decodeTypeInfo TypeInfoDecoder) (* }, nil } +func (m *MapExtraData) isExtraData() bool { + return true +} + // Encode encodes extra data as CBOR array: // // [type info, count, seed] @@ -399,7 +422,7 @@ func (m *MapExtraData) Encode(enc *Encoder) error { return nil } -func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (element, error) { +func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (element, error) { nt, err := cborDec.NextType() if err != nil { return nil, NewDecodingError(err) @@ -408,7 +431,7 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch nt { case cbor.ArrayType: // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). - return newSingleElementFromData(cborDec, decodeStorable) + return newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case cbor.TagType: tagNum, err := cborDec.DecodeTagNumber() @@ -418,10 +441,10 @@ func newElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDeco switch tagNum { case CBORTagInlineCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newInlineCollisionGroupFromData(). - return newInlineCollisionGroupFromData(cborDec, decodeStorable) + return newInlineCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) case CBORTagExternalCollisionGroup: // Don't need to wrap error as external error because err is already categorized by newExternalCollisionGroupFromData(). - return newExternalCollisionGroupFromData(cborDec, decodeStorable) + return newExternalCollisionGroupFromData(cborDec, decodeStorable, slabID, inlinedExtraData) default: return nil, NewDecodingError(fmt.Errorf("failed to decode element: unrecognized tag number %d", tagNum)) } @@ -452,7 +475,7 @@ func newSingleElement(storage SlabStorage, address Address, key Value, value Val }, nil } -func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*singleElement, error) { +func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*singleElement, error) { elemCount, err := cborDec.DecodeArrayHead() if err != nil { return nil, NewDecodingError(err) @@ -462,13 +485,13 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab return nil, NewDecodingError(fmt.Errorf("failed to decode single element: expect array of 2 elements, got %d elements", elemCount)) } - key, err := decodeStorable(cborDec, SlabIDUndefined) + key, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") } - value, err := decodeStorable(cborDec, SlabIDUndefined) + value, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode value's storable") @@ -484,7 +507,7 @@ func newSingleElementFromData(cborDec *cbor.StreamDecoder, decodeStorable Storab // Encode encodes singleElement to the given encoder. // // CBOR encoded array of 2 elements (key, value). -func (e *singleElement) Encode(enc *Encoder) error { +func (e *singleElement) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { // Encode CBOR array head for 2 elements err := enc.CBOR.EncodeRawBytes([]byte{0x82}) @@ -500,7 +523,7 @@ func (e *singleElement) Encode(enc *Encoder) error { } // Encode value - err = e.value.Encode(enc) + err = encodeStorableAsElement(enc, e.value, inlinedTypeInfo) if err != nil { // Wrap err as external error (if needed) because err is returned by Storable interface. return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode map value") @@ -648,8 +671,8 @@ func (e *singleElement) String() string { return fmt.Sprintf("%s:%s", e.key, e.value) } -func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*inlineCollisionGroup, error) { - elements, err := newElementsFromData(cborDec, decodeStorable) +func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*inlineCollisionGroup, error) { + elements, err := newElementsFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). return nil, err @@ -661,7 +684,7 @@ func newInlineCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable // Encode encodes inlineCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagInlineCollisionGroup, content: elements) -func (e *inlineCollisionGroup) Encode(enc *Encoder) error { +func (e *inlineCollisionGroup) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagInlineCollisionGroup @@ -671,7 +694,7 @@ func (e *inlineCollisionGroup) Encode(enc *Encoder) error { return NewEncodingError(err) } - err = e.elements.Encode(enc) + err = e.elements.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -829,9 +852,9 @@ func (e *inlineCollisionGroup) String() string { return "inline[" + e.elements.String() + "]" } -func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (*externalCollisionGroup, error) { +func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (*externalCollisionGroup, error) { - storable, err := decodeStorable(cborDec, SlabIDUndefined) + storable, err := decodeStorable(cborDec, slabID, inlinedExtraData) if err != nil { // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode Storable") @@ -851,7 +874,7 @@ func newExternalCollisionGroupFromData(cborDec *cbor.StreamDecoder, decodeStorab // Encode encodes externalCollisionGroup to the given encoder. // // CBOR tag (number: CBORTagExternalCollisionGroup, content: slab ID) -func (e *externalCollisionGroup) Encode(enc *Encoder) error { +func (e *externalCollisionGroup) Encode(enc *Encoder, _ *inlinedExtraData) error { err := enc.CBOR.EncodeRawBytes([]byte{ // tag number CBORTagExternalCollisionGroup 0xd8, CBORTagExternalCollisionGroup, @@ -1029,7 +1052,7 @@ func (e *externalCollisionGroup) String() string { return fmt.Sprintf("external(%s)", e.slabID) } -func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder) (elements, error) { +func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDecoder, slabID SlabID, inlinedExtraData []ExtraData) (elements, error) { arrayCount, err := cborDec.DecodeArrayHead() if err != nil { @@ -1076,7 +1099,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(singleElementsPrefixSize) elems := make([]*singleElement, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newSingleElementFromData(cborDec, decodeStorable) + elem, err := newSingleElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newSingleElementFromData(). return nil, err @@ -1102,7 +1125,7 @@ func newElementsFromData(cborDec *cbor.StreamDecoder, decodeStorable StorableDec size := uint32(hkeyElementsPrefixSize) elems := make([]element, elemCount) for i := 0; i < int(elemCount); i++ { - elem, err := newElementFromData(cborDec, decodeStorable) + elem, err := newElementFromData(cborDec, decodeStorable, slabID, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementFromData(). return nil, err @@ -1146,7 +1169,7 @@ func newHkeyElementsWithElement(level uint, hkey Digest, elem element) *hkeyElem // 1: hkeys (byte string) // 2: elements (array) // ] -func (e *hkeyElements) Encode(enc *Encoder) error { +func (e *hkeyElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("hash level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1200,7 +1223,7 @@ func (e *hkeyElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by element.Encode(). return err @@ -1216,6 +1239,70 @@ func (e *hkeyElements) Encode(enc *Encoder) error { return nil } +// EncodeCompositeValues encodes hkeyElements as an array of values ordered by orderedKeys. +// Level is not encoded because it is always 0. Digests are not encoded because +// they are encoded with composite keys in the composite extra data section. +func (e *hkeyElements) EncodeCompositeValues(enc *Encoder, orderedKeys []MapKey, inlinedTypeInfo *inlinedExtraData) error { + if e.level != 0 { + return NewEncodingError(fmt.Errorf("hash level must be 0 to be encoded as composite, got %d", e.level)) + } + + if len(e.elems) != len(orderedKeys) { + return NewEncodingError(fmt.Errorf("number of elements %d is different from number of elements in composite extra data %d", len(e.elems), len(orderedKeys))) + } + + var err error + + err = enc.CBOR.EncodeArrayHead(uint64(len(orderedKeys))) + if err != nil { + return NewEncodingError(err) + } + + keyIndexes := make([]int, len(e.elems)) + for i := 0; i < len(e.elems); i++ { + keyIndexes[i] = i + } + + // Encode values in the same order as orderedKeys. + for i, k := range orderedKeys { + key, ok := k.(EquatableStorable) + if !ok { + return NewEncodingError(fmt.Errorf("composite keys must be implement EquableStorable")) + } + + found := false + for j := i; j < len(keyIndexes); j++ { + index := keyIndexes[j] + se, ok := e.elems[index].(*singleElement) + if !ok { + return NewEncodingError(fmt.Errorf("composite element must not have collision")) + } + if key.Equal(se.key) { + found = true + keyIndexes[i], keyIndexes[j] = keyIndexes[j], keyIndexes[i] + + err = encodeStorableAsElement(enc, se.value, inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by encodeStorable(). + return err + } + + break + } + } + if !found { + return NewEncodingError(fmt.Errorf("failed to find key %v", k)) + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + func (e *hkeyElements) Get(storage SlabStorage, digester Digester, level uint, hkey Digest, comparator ValueComparator, key Value) (MapValue, error) { if level >= digester.Levels() { @@ -1797,7 +1884,7 @@ func newSingleElementsWithElement(level uint, elem *singleElement) *singleElemen // 1: hkeys (0 length byte string) // 2: elements (array) // ] -func (e *singleElements) Encode(enc *Encoder) error { +func (e *singleElements) Encode(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { if e.level > maxDigestLevel { return NewFatalError(fmt.Errorf("digest level %d exceeds max digest level %d", e.level, maxDigestLevel)) @@ -1828,7 +1915,7 @@ func (e *singleElements) Encode(enc *Encoder) error { // Encode each element for _, e := range e.elems { - err = e.Encode(enc) + err = e.Encode(enc, inlinedTypeInfo) if err != nil { // Don't need to wrap error as external error because err is already categorized by singleElement.Encode(). return err @@ -1844,6 +1931,10 @@ func (e *singleElements) Encode(enc *Encoder) error { return nil } +func (e *singleElements) EncodeCompositeValues(_ *Encoder, _ []MapKey, _ *inlinedExtraData) error { + return NewEncodingError(fmt.Errorf("singleElements can't encoded as composite value")) +} + func (e *singleElements) Get(storage SlabStorage, digester Digester, level uint, _ Digest, comparator ValueComparator, key Value) (MapValue, error) { if level != digester.Levels() { @@ -2147,7 +2238,7 @@ func newMapDataSlabFromDataV0( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, nil) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV0(). return nil, err @@ -2179,21 +2270,22 @@ func newMapDataSlabFromDataV0( // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-----------------------------+ -// | slab version + flag (2 bytes) | next sib slab ID (16 bytes) | -// +-------------------------------+-----------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func newMapDataSlabFromDataV1( id SlabID, @@ -2208,6 +2300,7 @@ func newMapDataSlabFromDataV1( ) { var err error var extraData *MapExtraData + var inlinedExtraData []ExtraData var next SlabID // Decode extra data @@ -2219,7 +2312,21 @@ func newMapDataSlabFromDataV1( } } - // Decode next slab ID + // Decode inlined extra data + if h.hasInlinedSlabs() { + inlinedExtraData, data, err = newInlinedExtraDataFromData( + data, + decMode, + decodeStorable, + decodeTypeInfo, + ) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newInlinedExtraDataFromData(). + return nil, err + } + } + + // Decode next slab ID for non-root slab if h.hasNextSlabID() { if len(data) < slabIDSize { return nil, NewDecodingErrorf("data is too short for map data slab") @@ -2236,7 +2343,7 @@ func newMapDataSlabFromDataV1( // Decode elements cborDec := decMode.NewByteStreamDecoder(data) - elements, err := newElementsFromData(cborDec, decodeStorable) + elements, err := newElementsFromData(cborDec, decodeStorable, id, inlinedExtraData) if err != nil { // Don't need to wrap error as external error because err is already categorized by newElementsFromDataV1(). return nil, err @@ -2264,28 +2371,285 @@ func newMapDataSlabFromDataV1( }, nil } +// DecodeInlinedCompositeStorable decodes inlined composite data. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedComposite, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedCompositeStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + + extraData, ok := inlinedExtraData[extraDataIndex].(*compositeExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect *compositeExtraData, got %T", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + + // Decode values + elemCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if elemCount != uint64(len(extraData.keys)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode composite values: got %d, expect %d", + elemCount, + extraData.mapExtraData.Count)) + } + + hkeys := make([]Digest, len(extraData.hkeys)) + copy(hkeys, extraData.hkeys) + + // Decode values + size := uint32(hkeyElementsPrefixSize) + elems := make([]element, elemCount) + for i := 0; i < int(elemCount); i++ { + value, err := decodeStorable(dec, parentSlabID, inlinedExtraData) + if err != nil { + return nil, err + } + + elemSize := singleElementPrefixSize + extraData.keys[i].ByteSize() + value.ByteSize() + // TODO: does key need to be copied? + elem := &singleElement{extraData.keys[i], value, elemSize} + + elems[i] = elem + size += digestSize + elem.Size() + } + + // Create hkeyElements + elements := &hkeyElements{ + hkeys: hkeys, + elems: elems, + level: 0, + size: size, + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // TODO: does extra data needs to be copied? + copiedExtraData := &MapExtraData{ + TypeInfo: extraData.mapExtraData.TypeInfo, + Count: extraData.mapExtraData.Count, + Seed: extraData.mapExtraData.Seed, + } + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: copiedExtraData, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + +// DecodeInlinedMapStorable decodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, and tag contant +// as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +// +// NOTE: This function doesn't decode tag number because tag number is decoded +// in the caller and decoder only contains tag content. +func DecodeInlinedMapStorable( + dec *cbor.StreamDecoder, + decodeStorable StorableDecoder, + parentSlabID SlabID, + inlinedExtraData []ExtraData, +) ( + Storable, + error, +) { + const inlinedMapDataSlabArrayCount = 3 + + arrayCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if arrayCount != inlinedMapDataSlabArrayCount { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined map data slab, expect array of %d elements, got %d elements", + inlinedMapDataSlabArrayCount, + arrayCount)) + } + + // element 0: extra data index + extraDataIndex, err := dec.DecodeUint64() + if err != nil { + return nil, NewDecodingError(err) + } + if extraDataIndex >= uint64(len(inlinedExtraData)) { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: inlined extra data index %d exceeds number of inlined extra data %d", + extraDataIndex, + len(inlinedExtraData))) + } + extraData, ok := inlinedExtraData[extraDataIndex].(*MapExtraData) + if !ok { + return nil, NewDecodingError( + fmt.Errorf( + "extra data (%T) is wrong type, expect *MapExtraData", + inlinedExtraData[extraDataIndex])) + } + + // element 1: slab index + b, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + if len(b) != slabIndexSize { + return nil, NewDecodingError( + fmt.Errorf( + "failed to decode inlined composite: expect %d bytes for slab index, got %d bytes", + slabIndexSize, + len(b))) + } + + var index [8]byte + copy(index[:], b) + + slabID := NewSlabID(parentSlabID.address, SlabIndex(index)) + + // Decode elements + elements, err := newElementsFromData(dec, decodeStorable, parentSlabID, inlinedExtraData) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by newElementsFromData(). + return nil, err + } + + header := MapSlabHeader{ + slabID: slabID, + size: inlinedMapDataSlabPrefixSize + elements.Size(), + firstKey: elements.firstKey(), + } + + // NOTE: extra data doesn't need to be copied because every inlined map has its own inlined extra data. + + return &MapDataSlab{ + header: header, + elements: elements, + extraData: extraData, + anySize: false, + collisionGroup: false, + inlined: true, + }, nil +} + // Encode encodes this map data slab to the given encoder. // // Root DataSlab Header: // -// +-------------------------------+------------+ -// | slab version + flag (2 bytes) | extra data | -// +-------------------------------+------------+ +// +-------------------------------+------------+---------------------------------+ +// | slab version + flag (2 bytes) | extra data | inlined extra data (if present) | +// +-------------------------------+------------+---------------------------------+ // -// Non-root DataSlab Header (18 bytes): +// Non-root DataSlab Header: // -// +-------------------------------+-------------------------+ -// | slab version + flag (2 bytes) | next slab ID (16 bytes) | -// +-------------------------------+-------------------------+ +// +-------------------------------+---------------------------------+-----------------------------+ +// | slab version + flag (2 bytes) | inlined extra data (if present) | next slab ID (if non-empty) | +// +-------------------------------+---------------------------------+-----------------------------+ // // Content: // // CBOR encoded elements // // See MapExtraData.Encode() for extra data section format. +// See InlinedExtraData.Encode() for inlined extra data section format. // See hkeyElements.Encode() and singleElements.Encode() for elements section format. func (m *MapDataSlab) Encode(enc *Encoder) error { + if m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode inlined map data slab as standalone slab")) + } + + // Encoding is done in two steps: + // + // 1. Encode map elements using a new buffer while collecting inlined extra data from inlined elements. + // 2. Encode slab with deduplicated inlined extra data and copy encoded elements from previous buffer. + + inlinedTypes := newInlinedExtraData() + + // TODO: maybe use a buffer pool + var buf bytes.Buffer + elemEnc := NewEncoder(&buf, enc.encMode) + + err := m.encodeElements(elemEnc, inlinedTypes) + if err != nil { + return err + } + const version = 1 slabType := slabMapData @@ -2314,7 +2678,11 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { h.setRoot() } - // Write head (version + flag) + if !inlinedTypes.empty() { + h.setHasInlinedSlabs() + } + + // Encode head _, err = enc.Write(h[:]) if err != nil { return NewEncodingError(err) @@ -2329,7 +2697,15 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } } - // Encode next slab ID + // Encode inlined types + if !inlinedTypes.empty() { + err = inlinedTypes.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + // Encode next slab ID for non-root slab if m.next != SlabIDUndefined { n, err := m.next.ToRawBytes(enc.Scratch[:]) if err != nil { @@ -2345,7 +2721,21 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { } // Encode elements - err = m.elements.Encode(enc) + err = enc.CBOR.EncodeRawBytes(buf.Bytes()) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeElements(enc *Encoder, inlinedTypes *inlinedExtraData) error { + err := m.elements.Encode(enc, inlinedTypes) if err != nil { // Don't need to wrap error as external error because err is already categorized by elements.Encode(). return err @@ -2359,6 +2749,196 @@ func (m *MapDataSlab) Encode(enc *Encoder) error { return nil } +// encodeAsInlined encodes inlined map data slab. Encoding is +// version 1 with CBOR tag having tag number CBORTagInlinedMap, +// and tag contant as 3-element array: +// +// - index of inlined extra data +// - value ID index +// - CBOR array of elements +func (m *MapDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + if m.extraData == nil { + return NewEncodingError( + fmt.Errorf("failed to encode non-root map data slab as inlined")) + } + + if !m.inlined { + return NewEncodingError( + fmt.Errorf("failed to encode standalone map data slab as inlined")) + } + + if m.canBeEncodedAsComposite() { + return m.encodeAsInlinedComposite(enc, inlinedTypeInfo) + } + + return m.encodeAsInlinedMap(enc, inlinedTypeInfo) +} + +func (m *MapDataSlab) encodeAsInlinedMap(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + + extraDataIndex := inlinedTypeInfo.addMapExtraData(m.extraData) + + if extraDataIndex > 255 { + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedMap, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab index + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements + err = m.elements.Encode(enc, inlinedTypeInfo) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by elements.Encode(). + return err + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func (m *MapDataSlab) encodeAsInlinedComposite(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error { + + // Composite extra data is deduplicated by TypeInfo.ID() and number of fields, + // Composite fields can be removed but new fields can't be added, and existing field types can't be modified. + // Given this, composites with same type ID and same number of fields have the same fields. + // See https://developers.flow.com/cadence/language/contract-updatability#fields + + extraDataIndex, orderedKeys, exist := inlinedTypeInfo.getCompositeTypeInfo(m.extraData.TypeInfo, int(m.extraData.Count)) + + if !exist { + elements, ok := m.elements.(*hkeyElements) + if !ok { + // This should never happen because canBeEncodedAsComposite() + // returns false for map containing any collision elements. + return NewEncodingError(fmt.Errorf("singleElements can't be encoded as composite elements")) + } + + orderedKeys = make([]MapKey, len(elements.elems)) + for i, e := range elements.elems { + e, ok := e.(*singleElement) + if !ok { + // This should never happen because canBeEncodedAsComposite() + // returns false for map containing any collision elements. + return NewEncodingError(fmt.Errorf("non-singleElement can't be encoded as composite elements")) + } + orderedKeys[i] = e.key + } + + extraDataIndex = inlinedTypeInfo.addCompositeExtraData(m.extraData, elements.hkeys, orderedKeys) + } + + if extraDataIndex > 255 { + // This should never happen because of slab size. + return NewEncodingError(fmt.Errorf("extra data index %d exceeds limit 255", extraDataIndex)) + } + + var err error + + // Encode tag number and array head of 3 elements + err = enc.CBOR.EncodeRawBytes([]byte{ + // tag number + 0xd8, CBORTagInlinedComposite, + // array head of 3 elements + 0x83, + }) + if err != nil { + return NewEncodingError(err) + } + + // element 0: extra data index + // NOTE: encoded extra data index is fixed sized CBOR uint + err = enc.CBOR.EncodeRawBytes([]byte{ + 0x18, + byte(extraDataIndex), + }) + if err != nil { + return NewEncodingError(err) + } + + // element 1: slab id + err = enc.CBOR.EncodeBytes(m.header.slabID.index[:]) + if err != nil { + return NewEncodingError(err) + } + + // element 2: map elements + err = m.elements.EncodeCompositeValues(enc, orderedKeys, inlinedTypeInfo) + if err != nil { + return NewEncodingError(err) + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +// canBeEncodedAsComposite returns true if: +// - map data slab is inlined +// - map is composite type +// - no collision elements +// - keys are stored inline (not in a separate slab) +func (m *MapDataSlab) canBeEncodedAsComposite() bool { + if !m.inlined { + return false + } + + if !m.extraData.TypeInfo.IsComposite() { + return false + } + + elements, ok := m.elements.(*hkeyElements) + if !ok { + return false + } + + for _, e := range elements.elems { + se, ok := e.(*singleElement) + if !ok { + // Has collision element + return false + } + if _, ok = se.key.(SlabIDStorable); ok { + // Key is stored in a separate slab + return false + } + } + + return true +} + func (m *MapDataSlab) hasPointer() bool { return m.elements.hasPointer() } @@ -2368,12 +2948,34 @@ func (m *MapDataSlab) ChildStorables() []Storable { } func (m *MapDataSlab) getPrefixSize() uint32 { + if m.inlined { + return inlinedMapDataSlabPrefixSize + } if m.extraData != nil { return mapRootDataSlabPrefixSize } return mapDataSlabPrefixSize } +func (m *MapDataSlab) Inlined() bool { + return m.inlined +} + +// Inlinable returns true if +// - map data slab is root slab +// - size of inlined map data slab <= maxInlineSize +func (m *MapDataSlab) Inlinable(maxInlineSize uint64) bool { + if m.extraData == nil { + // Non-root data slab is not inlinable. + return false + } + + inlinedSize := inlinedMapDataSlabPrefixSize + m.elements.Size() + + // Inlined byte size must be less than max inline size. + return uint64(inlinedSize) <= maxInlineSize +} + func elementsStorables(elems elements, childStorables []Storable) []Storable { switch v := elems.(type) { @@ -2441,10 +3043,12 @@ func (m *MapDataSlab) Set(storage SlabStorage, b DigesterBuilder, digester Diges m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } return existingValue, nil @@ -2465,10 +3069,12 @@ func (m *MapDataSlab) Remove(storage SlabStorage, digester Digester, level uint, m.header.size = m.getPrefixSize() + m.elements.Size() // Store modified slab - err = storage.Store(m.header.slabID, m) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + if !m.inlined { + err := storage.Store(m.header.slabID, m) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.header.slabID)) + } } return k, v, nil @@ -3027,6 +3633,14 @@ func (m *MapMetaDataSlab) Encode(enc *Encoder) error { return nil } +func (m *MapMetaDataSlab) Inlined() bool { + return false +} + +func (m *MapMetaDataSlab) Inlinable(_ uint64) bool { + return false +} + func (m *MapMetaDataSlab) StoredValue(storage SlabStorage) (Value, error) { if m.extraData == nil { return nil, NewNotValueError(m.SlabID()) @@ -3888,6 +4502,51 @@ func NewMapWithRootID(storage SlabStorage, rootID SlabID, digestBuilder Digester }, nil } +func (m *OrderedMap) Inlined() bool { + return m.root.Inlined() +} + +func (m *OrderedMap) setParentUpdater(f parentUpdater) { + m.parentUpdater = f +} + +// setCallbackWithChild sets up callback function with child value so +// parent map m can be notified when child value is modified. +func (m *OrderedMap) setCallbackWithChild( + comparator ValueComparator, + hip HashInputProvider, + key Value, + child Value, +) { + c, ok := child.(valueNotifier) + if !ok { + return + } + + c.setParentUpdater(func() error { + // Set child value with parent map using same key. + // Set() calls c.Storable() which returns inlined or not-inlined child storable. + existingValueStorable, err := m.Set(comparator, hip, key, c) + if err != nil { + return err + } + + if existingValueStorable == nil { + return NewFatalError(fmt.Errorf("failed to reset child value in parent updater callback because previous value is nil")) + } + + return nil + }) +} + +// notifyParentIfNeeded calls parent updater if this map is a child value. +func (m *OrderedMap) notifyParentIfNeeded() error { + if m.parentUpdater == nil { + return nil + } + return m.parentUpdater() +} + func (m *OrderedMap) Has(comparator ValueComparator, hip HashInputProvider, key Value) (bool, error) { _, err := m.get(comparator, hip, key) if err != nil { @@ -3914,6 +4573,9 @@ func (m *OrderedMap) Get(comparator ValueComparator, hip HashInputProvider, key // Wrap err as external error (if needed) because err is returned by Storable interface. return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to get storable's stored value") } + + m.setCallbackWithChild(comparator, hip, key, v) + return v, nil } @@ -3986,6 +4648,11 @@ func (m *OrderedMap) Set(comparator ValueComparator, hip HashInputProvider, key } } + err = m.notifyParentIfNeeded() + if err != nil { + return nil, err + } + return existingValue, nil } @@ -4035,6 +4702,11 @@ func (m *OrderedMap) Remove(comparator ValueComparator, hip HashInputProvider, k } } + err = m.notifyParentIfNeeded() + if err != nil { + return nil, nil, err + } + return k, v, nil } @@ -4142,24 +4814,88 @@ func (m *OrderedMap) promoteChildAsNewRoot(childID SlabID) error { } func (m *OrderedMap) SlabID() SlabID { + if m.root.Inlined() { + return SlabIDUndefined + } return m.root.SlabID() } func (m *OrderedMap) ValueID() ValueID { - sid := m.SlabID() + return slabIDToValueID(m.root.SlabID()) +} - var id ValueID - copy(id[:], sid.address[:]) - copy(id[8:], sid.index[:]) +// Storable returns OrderedMap m as either: +// - SlabIDStorable, or +// - inlined data slab storable +func (m *OrderedMap) Storable(_ SlabStorage, _ Address, maxInlineSize uint64) (Storable, error) { - return id -} + inlined := m.root.Inlined() + inlinable := m.root.Inlinable(maxInlineSize) -func (m *OrderedMap) StoredValue(_ SlabStorage) (Value, error) { - return m, nil -} + if inlinable && inlined { + // Root slab is inlinable and was inlined. + // Return root slab as storable, no size adjustment and change to storage. + return m.root, nil + } + + if !inlinable && !inlined { + // Root slab is not inlinable and was not inlined. + // Return root slab as storable, no size adjustment and change to storage. + return SlabIDStorable(m.SlabID()), nil + } + + if inlinable && !inlined { + // Root slab is inlinable and was NOT inlined. + + // Inline root data slab. + + // Inlineable root slab must be data slab. + rootDataSlab, ok := m.root.(*MapDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlinable map slab type %T", m.root)) + } + + rootID := rootDataSlab.header.slabID + + // Remove root slab from storage because it is going to be inlined. + err := m.Storage.Remove(rootID) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to remove slab %s", rootID)) + } + + // Update root data slab size from not inlined to inlined + rootDataSlab.header.size = inlinedMapDataSlabPrefixSize + rootDataSlab.elements.Size() + + // Update root data slab inlined status. + rootDataSlab.inlined = true + + return rootDataSlab, nil + } + + // here, root slab is NOT inlinable and was inlined. + + // Un-inline root slab. + + // Inlined root slab must be data slab. + rootDataSlab, ok := m.root.(*MapDataSlab) + if !ok { + return nil, NewFatalError(fmt.Errorf("unexpected inlined map slab type %T", m.root)) + } + + // Update root data slab size from inlined to not inlined. + rootDataSlab.header.size = mapRootDataSlabPrefixSize + rootDataSlab.elements.Size() + + // Update root data slab inlined status. + rootDataSlab.inlined = false + + // Store root slab in storage + err := m.Storage.Store(m.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.SlabID())) + } -func (m *OrderedMap) Storable(_ SlabStorage, _ Address, _ uint64) (Storable, error) { return SlabIDStorable(m.SlabID()), nil } @@ -4564,22 +5300,33 @@ func (m *OrderedMap) PopIterate(fn MapPopIterationFunc) error { extraData := m.root.ExtraData() extraData.Count = 0 + inlined := m.root.Inlined() + + prefixSize := uint32(mapRootDataSlabPrefixSize) + if inlined { + prefixSize = uint32(inlinedMapDataSlabPrefixSize) + } + // Set root to empty data slab m.root = &MapDataSlab{ header: MapSlabHeader{ slabID: rootID, - size: mapRootDataSlabPrefixSize + hkeyElementsPrefixSize, + size: prefixSize + hkeyElementsPrefixSize, }, elements: newHkeyElements(0), extraData: extraData, + inlined: inlined, } - // Save root slab - err = m.Storage.Store(m.root.SlabID(), m.root) - if err != nil { - // Wrap err as external error (if needed) because err is returned by SlabStorage interface. - return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + if !m.Inlined() { + // Save root slab + err = m.Storage.Store(m.root.SlabID(), m.root) + if err != nil { + // Wrap err as external error (if needed) because err is returned by SlabStorage interface. + return wrapErrorfAsExternalErrorIfNeeded(err, fmt.Sprintf("failed to store slab %s", m.root.SlabID())) + } } + return nil } diff --git a/map_debug.go b/map_debug.go index 051b7acb..9e752325 100644 --- a/map_debug.go +++ b/map_debug.go @@ -107,6 +107,9 @@ func GetMapStats(m *OrderedMap) (MapStats, error) { if _, ok := e.value.(SlabIDStorable); ok { storableDataSlabCount++ } + // This handles use case of inlined array or map value containing SlabID + ids := getSlabIDFromStorable(e.value, nil) + storableDataSlabCount += uint64(len(ids)) } } } @@ -188,12 +191,7 @@ func DumpMapSlabs(m *OrderedMap) ([]string, error) { } } - childStorables := dataSlab.ChildStorables() - for _, e := range childStorables { - if id, ok := e.(SlabIDStorable); ok { - overflowIDs = append(overflowIDs, SlabID(id)) - } - } + overflowIDs = getSlabIDFromStorable(dataSlab, overflowIDs) } else { meta := slab.(*MapMetaDataSlab) @@ -271,7 +269,7 @@ func ValidMap(m *OrderedMap, typeInfo TypeInfo, tic TypeInfoComparator, hip Hash } computedCount, dataSlabIDs, nextDataSlabIDs, firstKeys, err := validMapSlab( - m.Storage, m.digesterBuilder, tic, hip, m.root.SlabID(), 0, nil, []SlabID{}, []SlabID{}, []Digest{}) + m.Storage, m.digesterBuilder, tic, hip, m.root, 0, nil, []SlabID{}, []SlabID{}, []Digest{}) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return err @@ -320,7 +318,7 @@ func validMapSlab( digesterBuilder DigesterBuilder, tic TypeInfoComparator, hip HashInputProvider, - id SlabID, + slab MapSlab, level int, headerFromParentSlab *MapSlabHeader, dataSlabIDs []SlabID, @@ -334,11 +332,7 @@ func validMapSlab( err error, ) { - slab, err := getMapSlab(storage, id) - if err != nil { - // Don't need to wrap error as external error because err is already categorized by getMapSlab(). - return 0, nil, nil, nil, err - } + id := slab.Header().slabID if level > 0 { // Verify that non-root slab doesn't have extra data. @@ -388,10 +382,18 @@ func validMapSlab( id, dataSlab.header.firstKey, dataSlab.elements.firstKey())) } + // Verify that only root slab can be inlined + if level > 0 && slab.Inlined() { + return 0, nil, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id)) + } + // Verify that aggregated element size + slab prefix is the same as header.size computedSize := uint32(mapDataSlabPrefixSize) if level == 0 { computedSize = uint32(mapRootDataSlabPrefixSize) + if dataSlab.Inlined() { + computedSize = uint32(inlinedMapDataSlabPrefixSize) + } } computedSize += elementSize @@ -444,10 +446,16 @@ func validMapSlab( for i := 0; i < len(meta.childrenHeaders); i++ { h := meta.childrenHeaders[i] + childSlab, err := getMapSlab(storage, h.slabID) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by getMapSlab(). + return 0, nil, nil, nil, err + } + // Verify child slabs count := uint64(0) count, dataSlabIDs, nextDataSlabIDs, firstKeys, err = - validMapSlab(storage, digesterBuilder, tic, hip, h.slabID, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) + validMapSlab(storage, digesterBuilder, tic, hip, childSlab, level+1, &h, dataSlabIDs, nextDataSlabIDs, firstKeys) if err != nil { // Don't need to wrap error as external error because err is already categorized by validMapSlab(). return 0, nil, nil, nil, err @@ -849,16 +857,31 @@ func validMapSlabSerialization( } // Extra check: encoded data size == header.size - encodedSlabSize, err := computeSlabSize(data) + // This check is skipped for slabs with inlined composite because + // encoded size and slab size differ for inlined composites. + // For inlined composites, digests and field keys are encoded in + // composite extra data section for reuse, and only composite field + // values are encoded in non-extra data section. + // This reduces encoding size because composite values of the same + // composite type can reuse encoded type info, seed, digests, and field names. + // TODO: maybe add size check for slabs with inlined composite by decoding entire slab. + inlinedComposite, err := hasInlinedComposite(data) if err != nil { - // Don't need to wrap error as external error because err is already categorized by computeSlabSize(). + // Don't need to wrap error as external error because err is already categorized by hasInlinedComposite(). return err } + if !inlinedComposite { + encodedSlabSize, err := computeSize(data) + if err != nil { + // Don't need to wrap error as external error because err is already categorized by computeSize(). + return err + } - if slab.Header().size != uint32(encodedSlabSize) { - return NewFatalError( - fmt.Errorf("slab %d encoded size %d != header.size %d", - id, encodedSlabSize, slab.Header().size)) + if slab.Header().size != uint32(encodedSlabSize) { + return NewFatalError( + fmt.Errorf("slab %d encoded size %d != header.size %d", + id, encodedSlabSize, slab.Header().size)) + } } // Compare encoded data of original slab with encoded data of decoded slab @@ -953,6 +976,11 @@ func mapDataSlabEqual( return err } + // Compare inlined + if expected.inlined != actual.inlined { + return NewFatalError(fmt.Errorf("inlined %t is wrong, want %t", actual.inlined, expected.inlined)) + } + // Compare next if expected.next != actual.next { return NewFatalError(fmt.Errorf("next %d is wrong, want %d", actual.next, expected.next)) @@ -1287,14 +1315,14 @@ func mapSingleElementEqual( } } - if !compare(expected.value, actual.value) { - return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) - } - - // Compare value stored in a separate slab - if idStorable, ok := expected.value.(SlabIDStorable); ok { + // Compare nested element + switch ee := expected.value.(type) { + case SlabIDStorable: + if !compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } - v, err := idStorable.StoredValue(storage) + v, err := ee.StoredValue(storage) if err != nil { // Don't need to wrap error as external error because err is already categorized by SlabIDStorable.StoredValue(). return err @@ -1312,6 +1340,27 @@ func mapSingleElementEqual( // Don't need to wrap error as external error because err is already categorized by ValidValueSerialization(). return err } + + case *ArrayDataSlab: + ae, ok := actual.value.(*ArrayDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *ArrayDataSlab, actual %T", ae)) + } + + return arrayDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + case *MapDataSlab: + ae, ok := actual.value.(*MapDataSlab) + if !ok { + return NewFatalError(fmt.Errorf("expect element as *MapDataSlab, actual %T", ae)) + } + + return mapDataSlabEqual(ee, ae, storage, cborDecMode, cborEncMode, decodeStorable, decodeTypeInfo, compare) + + default: + if !compare(expected.value, actual.value) { + return NewFatalError(fmt.Errorf("singleElement value %v is wrong, want %v", actual.value, expected.value)) + } } return nil diff --git a/map_test.go b/map_test.go index 6ccf380b..6d4cc71f 100644 --- a/map_test.go +++ b/map_test.go @@ -2639,7 +2639,7 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("has pointer no collision", func(t *testing.T) { + t.Run("has inlined array", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) @@ -2697,7 +2697,6 @@ func TestMapEncodeDecode(t *testing.T) { id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} - id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ @@ -2731,7 +2730,7 @@ func TestMapEncodeDecode(t *testing.T) { // child header 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x00, 0xf2, + 0x00, 0xf3, }, // data slab @@ -2786,9 +2785,16 @@ func TestMapEncodeDecode(t *testing.T) { // data slab id3: { // version - 0x10, - // flag: has pointer + map data - 0x48, + 0x11, + // flag: has inlined slab + map data + 0x08, + + // inlined slab extra data + 0x81, + // inlined array extra data + 0xd8, 0xf7, + 0x81, + 0x18, 0x2b, // the following encoded data is valid CBOR @@ -2827,23 +2833,7 @@ func TestMapEncodeDecode(t *testing.T) { // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] 0x82, 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, - 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - }, - // array data slab - id4: { - // version - 0x10, - // flag: root + array data - 0x80, - // extra data (CBOR encoded array of 1 elements) - 0x81, - // type info - 0x18, 0x2b, - - // CBOR encoded array head (fixed size 3 byte) - 0x99, 0x00, 0x01, - // CBOR encoded array elements - 0xd8, 0xa4, 0x00, + 0xd8, 0xfa, 0x83, 0x18, 0x00, 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x99, 0x00, 0x01, 0xd8, 0xa4, 0x0, }, } @@ -2855,15 +2845,15 @@ func TestMapEncodeDecode(t *testing.T) { require.Equal(t, expected[id1], stored[id1]) require.Equal(t, expected[id2], stored[id2]) require.Equal(t, expected[id3], stored[id3]) - require.Equal(t, expected[id4], stored[id4]) // Verify slab size in header is correct. meta, ok := m.root.(*MapMetaDataSlab) require.True(t, ok) require.Equal(t, 2, len(meta.childrenHeaders)) require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) - // Need to add slabIDSize to encoded data slab here because empty slab ID is omitted during encoding. - require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + const inlinedExtraDataSize = 6 + require.Equal(t, uint32(len(stored[id3])-inlinedExtraDataSize+slabIDSize), meta.childrenHeaders[1].size) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -2875,180 +2865,175 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 1 level", func(t *testing.T) { - + t.Run("root data slab, inlined child map of same type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo := testTypeInfo{43} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 + const mapSize = 2 keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 4), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map metadata slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 8 - 0x08, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // the following encoded data is valid CBOR - - // elements (array of 3 elements) + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, 0x83, - - // level: 0 - 0x00, - - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 2 elements) - 0x99, 0x00, 0x04, - - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, 0x83, - - // level: 1 + // type info + 0x18, 0x2b, + // count: 1 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // the following encoded data is valid CBOR - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // elements (array of 3 elements) 0x83, - // level: 1 - 0x01, + // level: 0 + 0x00, // hkeys (byte string of length 8 * 2) 0x59, 0x00, 0x10, // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, // elements (array of 2 elements) // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 2) - 0x59, 0x00, 0x10, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3062,58 +3047,105 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("inline collision 2 levels", func(t *testing.T) { - + t.Run("root data slab, inlined child map of different type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 8 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 4), Digest(i % 2)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo2 + } else { + ti = childMapTypeInfo1 + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, + // version, has inlined slab + 0x11, // flag: root + map data 0x88, - // extra data (CBOR encoded array of 3 elements) + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" + // type info 0x18, 0x2a, - // count: 8 - 0x08, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3122,170 +3154,76 @@ func TestMapEncodeDecode(t *testing.T) { // level: 0 0x00, - // hkeys (byte string of length 8 * 4) - 0x59, 0x00, 0x20, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - - // elements (array of 4 elements) - 0x99, 0x00, 0x04, - - // inline collision group corresponding to hkey 0 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level 1 - 0x01, - - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // elements (array of 1 elements) - 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [0, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x02, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - - // inline collision group corresponding to hkey 1 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 1 + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // elements (array of 1 elements) - 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [1, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - // elements (array of 2 elements) // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - - // inline collision group corresponding to hkey 2 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - - // elements (array of 1 element) + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, - // inline collision group corresponding to hkey [2, 0] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - - // inline collision group corresponding to hkey 3 - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) + // extra data index 0 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 1) + // level 0 + 0x00, + // hkey bytes 0x59, 0x00, 0x08, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - - // elements (array of 1 element) + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element 0x99, 0x00, 0x01, - - // inline collision group corresponding to hkey [3, 1] - // (tag number CBORTagInlineCollisionGroup) - 0xd8, 0xfd, - // (tag content: array of 3 elements) - 0x83, - - // level: 2 - 0x02, - - // hkeys (empty byte string) - 0x40, - - // elements (array of 2 element) - 0x99, 0x00, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) @@ -3299,60 +3237,128 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("external collision", func(t *testing.T) { - + t.Run("root data slab, multiple levels of inlined child map of same type", func(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) + childMapTypeInfo := testTypeInfo{43} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - const mapSize = 20 - keyValues := make(map[Value]Value) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i * 2) + var k, v Value - digests := []Digest{Digest(i % 2), Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - keyValues[k] = v + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = Uint64Value(i) + + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap } - require.Equal(t, uint64(mapSize), m.Count()) + require.Equal(t, uint64(mapSize), parentMap.Count()) id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} - id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} // Expected serialized slab data with slab id expected := map[SlabID][]byte{ - - // map data slab id1: { - // version - 0x10, - // flag: root + has pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x14, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, // seed 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // the following encoded data is valid CBOR // elements (array of 3 elements) @@ -3363,161 +3369,118 @@ func TestMapEncodeDecode(t *testing.T) { // hkeys (byte string of length 8 * 2) 0x59, 0x00, 0x10, - // hkey: 0 + // hkey: 1 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) 0x99, 0x00, 0x02, - // external collision group corresponding to hkey 0 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - - // external collision group corresponding to hkey 1 - // (tag number CBORTagExternalCollisionGroup) - 0xd8, 0xfe, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - }, - - // external collision group - id2: { - // version - 0x10, - // flag: any size + collision group - 0x2b, - - // the following encoded data is valid CBOR + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // elements (array of 3 elements) + // inlined map elements (array of 3 elements) 0x83, - - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // hkey: 2 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - // hkey: 4 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - // hkey: 6 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - // hkey: 8 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - // hkey: 10 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, - // hkey: 12 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, - // hkey: 14 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, - // hkey: 16 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, - // hkey: 18 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, - - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - // element: [uint64(2), uint64(4)] - 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, - // element: [uint64(4), uint64(8)] - 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, - // element: [uint64(6), uint64(12)] - 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, - // element: [uint64(8), uint64(16)] - 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, - // element: [uint64(10), uint64(20)] - 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, - // element: [uint64(12), uint64(24)] - 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, - // element: [uint64(14), uint64(28)] - 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, - // element: [uint64(16), uint64(32)] - 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, - // element: [uint64(18), uint64(36)] - 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, - }, - - // external collision group - id3: { - // version - 0x10, - // flag: any size + collision group - 0x2b, - - // the following encoded data is valid CBOR - - // elements (array of 3 elements) + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x0, + // value: 0 + 0xd8, 0xa4, 0x0, - // level: 1 - 0x01, - - // hkeys (byte string of length 8 * 10) - 0x59, 0x00, 0x50, - // hkey: 1 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - // hkey: 3 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - // hkey: 5 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // hkey: 7 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, - // hkey: 9 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, - // hkey: 11 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, - // hkey: 13 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, - // hkey: 15 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, - // hkey: 17 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, - // hkey: 19 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, - - // elements (array of 10 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x0a, - // element: [uint64(1), uint64(2)] - 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, - // element: [uint64(3), uint64(6)] - 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, - // element: [uint64(5), uint64(10)] - 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, - // element: [uint64(7), uint64(14)] - 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, - // element: [uint64(9), uint64(18)] - 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, - // element: [uint64(11), uint64(22))] - 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, - // element: [uint64(13), uint64(26)] - 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, - // element: [uint64(15), uint64(30)] - 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, - // element: [uint64(17), uint64(34)] - 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, - // element: [uint64(19), uint64(38)] - 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, }, } + // Verify encoded data stored, err := storage.Encode() require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) require.Equal(t, expected[id1], stored[id1]) - require.Equal(t, expected[id2], stored[id2]) - require.Equal(t, expected[id3], stored[id3]) // Decode data to new storage storage2 := newTestPersistentStorageWithData(t, stored) @@ -3529,3345 +3492,8805 @@ func TestMapEncodeDecode(t *testing.T) { verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) }) - t.Run("pointer", func(t *testing.T) { + t.Run("root data slab, multiple levels of inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + gchildMapTypeInfo1 := testTypeInfo{45} + gchildMapTypeInfo2 := testTypeInfo{46} + // Create and populate map in memory storage := newTestBasicStorage(t) digesterBuilder := &mockDigesterBuilder{} // Create map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - k := Uint64Value(0) - v := Uint64Value(0) + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value - digests := []Digest{Digest(0), Digest(1)} - digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + var gti TypeInfo + if i%2 == 0 { + gti = gchildMapTypeInfo2 + } else { + gti = gchildMapTypeInfo1 + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + // Create grand child map + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gti) + require.NoError(t, err) - require.Equal(t, uint64(1), m.Count()) + k = Uint64Value(i) + v = Uint64Value(i * 2) - id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // Insert element to grand child map + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - expectedNoPointer := []byte{ + var cti TypeInfo + if i%2 == 0 { + cti = childMapTypeInfo2 + } else { + cti = childMapTypeInfo1 + } - // version - 0x10, - // flag: root + map data - 0x88, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), cti) + require.NoError(t, err) - // the following encoded data is valid CBOR + k = Uint64Value(i) - // elements (array of 3 elements) - 0x83, + // Insert grand child map to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // level: 0 - 0x00, + k = NewStringValue(string(r)) + r++ - // hkeys (byte string of length 8 * 1) - 0x59, 0x00, 0x08, - // hkey: 0 - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), uint64(0)] - 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, - } + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Verify encoded data - stored, err := storage.Encode() - require.NoError(t, err) - require.Equal(t, 1, len(stored)) - require.Equal(t, expectedNoPointer, stored[id1]) + keyValues[k] = childMap + } - // Overwrite existing value with long string - vs := NewStringValue(strings.Repeat("a", 512)) - existingStorable, err = m.Set(compare, hashInputProvider, k, vs) - require.NoError(t, err) + require.Equal(t, uint64(mapSize), parentMap.Count()) - existingValue, err := existingStorable.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, v, existingValue) + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} - expectedHasPointer := []byte{ + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version 1, flag: has inlined slab + 0x11, + // flag: root + map data + 0x88, - // version - 0x10, - // flag: root + pointer + map data - 0xc8, - // extra data (CBOR encoded array of 3 elements) - 0x83, - // type info: "map" - 0x18, 0x2A, - // count: 10 - 0x01, - // seed - 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, - // the following encoded data is valid CBOR + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 44 + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, - // elements (array of 3 elements) - 0x83, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 46 + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, - // level: 0 - 0x00, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 43 + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // element 3 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info: 45 + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined child map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc0, 0xba, 0xe2, 0x41, 0xcf, 0xda, 0xb7, 0x84, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x1, + // inlined map slab index + 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x3a, 0x2d, 0x24, 0x7c, 0xca, 0xdf, 0xa0, 0x58, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined grand child map (tag: CBORTagInlineMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x3, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined grand child map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x68, 0x9f, 0x33, 0x33, 0x89, 0x0d, 0x89, 0xd1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + // value: 2 + 0xd8, 0xa4, 0x2, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of same type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("root metadata slab, inlined child map of different type", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testTypeInfo{43} + childMapTypeInfo2 := testTypeInfo{44} + childMapTypeInfo3 := testTypeInfo{45} + childMapTypeInfo4 := testTypeInfo{46} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + var ti TypeInfo + switch i % 4 { + case 0: + ti = childMapTypeInfo1 + case 1: + ti = childMapTypeInfo2 + case 2: + ti = childMapTypeInfo3 + case 3: + ti = childMapTypeInfo4 + } + + // Create child map + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = Uint64Value(i) + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue(string(r)) + r++ + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 10}} // inlined maps index 2-9 + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 11}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version + 0x10, + // flag: root + map metadata + 0x89, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xda, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xda, + }, + id2: { + // version, flag: has inlined slab, has next slab ID + 0x13, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x8d, 0x99, 0xcc, 0x54, 0xc8, 0x6b, 0xab, 0x50, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xeb, 0x0e, 0x1d, 0xca, 0x7a, 0x7e, 0xe1, 0x19, + + // next slab ID + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "a" + 0x61, 0x61, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: "b" + 0x61, 0x62, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xca, 0x96, 0x9f, 0xeb, 0x5f, 0x29, 0x4f, 0xb9, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: 1 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: "c" + 0x61, 0x63, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc4, 0x85, 0xc1, 0xd1, 0xd5, 0xc0, 0x40, 0x96, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: 4 + 0xd8, 0xa4, 0x04, + + // element 4: + 0x82, + // key: "d" + 0x61, 0x64, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xc5, 0x75, 0x9c, 0xf7, 0x20, 0xc5, 0x65, 0xa1, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + + id3: { + // version, flag: has inlined slab + 0x11, + // flag: map data + 0x08, + + // 4 inlined slab extra data + 0x84, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x4f, 0xca, 0x11, 0xbd, 0x8d, 0xcb, 0xfb, 0x64, + // element 1 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2c, + // count: 1 + 0x01, + // seed + 0x1b, 0xdc, 0xe4, 0xe4, 0x6, 0xa9, 0x50, 0x40, 0xb9, + // element 2 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2d, + // count: 1 + 0x01, + // seed + 0x1b, 0x79, 0xb3, 0x45, 0x84, 0x9e, 0x66, 0xa5, 0xa4, + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2e, + // count: 1 + 0x01, + // seed + 0x1b, 0xdd, 0xbd, 0x43, 0x10, 0xbe, 0x2d, 0xa9, 0xfc, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + + // element 0: + 0x82, + // key: "e" + 0x61, 0x65, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x8e, 0x5e, 0x4f, 0xf6, 0xec, 0x2f, 0x2a, 0xcf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 4 + 0xd8, 0xa4, 0x04, + // value: 8 + 0xd8, 0xa4, 0x08, + + // element 1: + 0x82, + // key: "f" + 0x61, 0x66, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x0d, 0x36, 0x1e, 0xfd, 0xbb, 0x5c, 0x05, 0xdf, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 5 + 0xd8, 0xa4, 0x05, + // value: 10 + 0xd8, 0xa4, 0x0a, + + // element 3: + 0x82, + // key: "g" + 0x61, 0x67, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 2 + 0x18, 0x02, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x6d, 0x8e, 0x42, 0xa2, 0x00, 0xc6, 0x71, 0xf2, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 6 + 0xd8, 0xa4, 0x06, + // value: 12 + 0xd8, 0xa4, 0x0c, + + // element 4: + 0x82, + // key: "h" + 0x61, 0x68, + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 3 + 0x18, 0x03, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0xbb, 0x06, 0x37, 0x6e, 0x3a, 0x78, 0xe8, 0x6c, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 7 + 0xd8, 0xa4, 0x07, + // value: 14 + 0xd8, 0xa4, 0x0e, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 1 level", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 2 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("inline collision 2 levels", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 4), Digest(i % 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2a, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + 0x99, 0x00, 0x04, + + // inline collision group corresponding to hkey 0 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [0, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + + // inline collision group corresponding to hkey 1 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 elements) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [1, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + + // inline collision group corresponding to hkey 2 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [2, 0] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + + // inline collision group corresponding to hkey 3 + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 1 element) + 0x99, 0x00, 0x01, + + // inline collision group corresponding to hkey [3, 1] + // (tag number CBORTagInlineCollisionGroup) + 0xd8, 0xfd, + // (tag content: array of 3 elements) + 0x83, + + // level: 2 + 0x02, + + // hkeys (empty byte string) + 0x40, + + // elements (array of 2 element) + 0x99, 0x00, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("external collision", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 20 + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + + digests := []Digest{Digest(i % 2), Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = v + } + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // map data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x14, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + 0x99, 0x00, 0x02, + + // external collision group corresponding to hkey 0 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + + // external collision group corresponding to hkey 1 + // (tag number CBORTagExternalCollisionGroup) + 0xd8, 0xfe, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // external collision group + id2: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 8 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + // hkey: 10 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, + // hkey: 12 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, + // hkey: 14 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + // hkey: 16 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, + // hkey: 18 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + // element: [uint64(2), uint64(4)] + 0x82, 0xd8, 0xa4, 0x02, 0xd8, 0xa4, 0x04, + // element: [uint64(4), uint64(8)] + 0x82, 0xd8, 0xa4, 0x04, 0xd8, 0xa4, 0x08, + // element: [uint64(6), uint64(12)] + 0x82, 0xd8, 0xa4, 0x06, 0xd8, 0xa4, 0x0c, + // element: [uint64(8), uint64(16)] + 0x82, 0xd8, 0xa4, 0x08, 0xd8, 0xa4, 0x10, + // element: [uint64(10), uint64(20)] + 0x82, 0xd8, 0xa4, 0x0a, 0xd8, 0xa4, 0x14, + // element: [uint64(12), uint64(24)] + 0x82, 0xd8, 0xa4, 0x0c, 0xd8, 0xa4, 0x18, 0x18, + // element: [uint64(14), uint64(28)] + 0x82, 0xd8, 0xa4, 0x0e, 0xd8, 0xa4, 0x18, 0x1c, + // element: [uint64(16), uint64(32)] + 0x82, 0xd8, 0xa4, 0x10, 0xd8, 0xa4, 0x18, 0x20, + // element: [uint64(18), uint64(36)] + 0x82, 0xd8, 0xa4, 0x12, 0xd8, 0xa4, 0x18, 0x24, + }, + + // external collision group + id3: { + // version + 0x10, + // flag: any size + collision group + 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 1 + 0x01, + + // hkeys (byte string of length 8 * 10) + 0x59, 0x00, 0x50, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + // hkey: 9 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + // hkey: 11 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, + // hkey: 13 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, + // hkey: 15 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, + // hkey: 17 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + // hkey: 19 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, + + // elements (array of 10 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x0a, + // element: [uint64(1), uint64(2)] + 0x82, 0xd8, 0xa4, 0x01, 0xd8, 0xa4, 0x02, + // element: [uint64(3), uint64(6)] + 0x82, 0xd8, 0xa4, 0x03, 0xd8, 0xa4, 0x06, + // element: [uint64(5), uint64(10)] + 0x82, 0xd8, 0xa4, 0x05, 0xd8, 0xa4, 0x0a, + // element: [uint64(7), uint64(14)] + 0x82, 0xd8, 0xa4, 0x07, 0xd8, 0xa4, 0x0e, + // element: [uint64(9), uint64(18)] + 0x82, 0xd8, 0xa4, 0x09, 0xd8, 0xa4, 0x12, + // element: [uint64(11), uint64(22))] + 0x82, 0xd8, 0xa4, 0x0b, 0xd8, 0xa4, 0x16, + // element: [uint64(13), uint64(26)] + 0x82, 0xd8, 0xa4, 0x0d, 0xd8, 0xa4, 0x18, 0x1a, + // element: [uint64(15), uint64(30)] + 0x82, 0xd8, 0xa4, 0x0f, 0xd8, 0xa4, 0x18, 0x1e, + // element: [uint64(17), uint64(34)] + 0x82, 0xd8, 0xa4, 0x11, 0xd8, 0xa4, 0x18, 0x22, + // element: [uint64(19), uint64(38)] + 0x82, 0xd8, 0xa4, 0x13, 0xd8, 0xa4, 0x18, 0x26, + }, + } + + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child map + typeInfo2 := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo2) + require.NoError(t, err) + + for i := 0; i < 2; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat("b", 22)) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childMap + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version + 0x10, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,2)] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2b, + // count + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x4f, 0x6a, 0x3e, 0x93, 0xdd, 0xb1, 0xbe, 0x5, + // hkey + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [1:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x1, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [0:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0xd8, 0xa4, 0x0, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child map", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize-1; i++ { + k := Uint64Value(i) + v := Uint64Value(i * 2) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Create child map + childTypeInfo := testTypeInfo{43} + + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childTypeInfo) + require.NoError(t, err) + + // Create grand child map + gchildTypeInfo := testTypeInfo{44} + + gchildMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), gchildTypeInfo) + require.NoError(t, err) + + r := 'a' + for i := 0; i < 2; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, Uint64Value(0), gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k := Uint64Value(mapSize - 1) + v := childMap + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child map + existingStorable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // root slab (data slab) ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child map slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined map extra data + 0xd8, 0xf8, + 0x83, + // type info + 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [0:0] + 0x82, + 0xd8, 0xa4, 0x0, + 0xd8, 0xa4, 0x0, + // element: [1:inlined map] + 0x82, + // key: 1 + 0xd8, 0xa4, 0x1, + + // value: inlined map (tag: CBORTagInlinedMap) + 0xd8, 0xfb, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined map elements (array of 3 elements) + 0x83, + // level 0 + 0x00, + // hkey bytes + 0x59, 0x00, 0x08, + 0x93, 0x26, 0xc4, 0xd9, 0xc6, 0xea, 0x1c, 0x45, + // 1 element + 0x99, 0x00, 0x01, + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // map data slab + id2: { + // version + 0x10, + // flag: root + map data + 0x88, + + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info + 0x18, 0x2c, + // count + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0xa, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey + 0x30, 0x43, 0xc5, 0x14, 0x8f, 0x52, 0x18, 0x43, + // hkey + 0x98, 0x0f, 0x5c, 0xdb, 0x37, 0x71, 0x6c, 0x13, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 8 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create nested array + typeInfo2 := testTypeInfo{43} + + nestedArray, err := NewArray(storage, address, typeInfo2) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = nestedArray.Append(v) + require.NoError(t, err) + } + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := nestedArray + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert nested array + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + id3 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + id4 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 4}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // metadata slab + id1: { + // version + 0x10, + // flag: root + map meta + 0x89, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 8 + 0x08, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // child shared address + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + + // child header count + 0x00, 0x02, + // child header 1 (slab id, first key, size) + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xf6, + // child header 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0xf2, + }, + + // data slab + id2: { + // version + 0x12, + // flag: map data + 0x08, + // next slab id + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:bbbbbbbbbbbbbbbbbbbbbb] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + // element: [cccccccccccccccccccccc:cccccccccccccccccccccc] + 0x82, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + 0x76, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, + // element: [dddddddddddddddddddddd:dddddddddddddddddddddd] + 0x82, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + 0x76, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, 0x64, + }, + + // data slab + id3: { + // version + 0x10, + // flag: has pointer + map data + 0x48, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 4 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // hkey: 5 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // hkey: 6 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + // hkey: 7 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element: [eeeeeeeeeeeeeeeeeeeeee:eeeeeeeeeeeeeeeeeeeeee] + 0x82, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x76, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + // element: [ffffffffffffffffffffff:ffffffffffffffffffffff] + 0x82, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x76, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + // element: [gggggggggggggggggggggg:gggggggggggggggggggggg] + 0x82, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + 0x76, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, 0x67, + // element: [hhhhhhhhhhhhhhhhhhhhhh:SlabID(1,2,3,4,5,6,7,8,0,0,0,0,0,0,0,4)] + 0x82, + 0x76, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, 0x68, + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + }, + + // array data slab + id4: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2b, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + require.Equal(t, expected[id3], stored[id3]) + require.Equal(t, expected[id4], stored[id4]) + + // Verify slab size in header is correct. + meta, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) + require.Equal(t, 2, len(meta.childrenHeaders)) + require.Equal(t, uint32(len(stored[id2])), meta.childrenHeaders[0].size) + require.Equal(t, uint32(len(stored[id3])+slabIDSize), meta.childrenHeaders[1].size) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to grand child array", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + r := 'a' + for i := uint64(0); i < mapSize-1; i++ { + k := NewStringValue(strings.Repeat(string(r), 22)) + v := NewStringValue(strings.Repeat(string(r), 22)) + keyValues[k] = v + + digests := []Digest{Digest(i), Digest(i * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + r++ + } + + // Create child array + childTypeInfo := testTypeInfo{43} + + childArray, err := NewArray(storage, address, childTypeInfo) + require.NoError(t, err) + + // Create grand child array + gchildTypeInfo := testTypeInfo{44} + + gchildArray, err := NewArray(storage, address, gchildTypeInfo) + require.NoError(t, err) + + for i := 0; i < 5; i++ { + v := NewStringValue(strings.Repeat("b", 22)) + err = gchildArray.Append(v) + require.NoError(t, err) + } + + // Insert grand child array to child array + err = childArray.Append(gchildArray) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat(string(r), 22)) + v := childArray + keyValues[k] = v + + digests := []Digest{Digest(mapSize - 1), Digest((mapSize - 1) * 2)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + // Insert child array to parent map + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(mapSize), m.Count()) + + // parent map root slab ID + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + // grand child array root slab ID + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 3}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + + // data slab + id1: { + // version, flag: has inlined slab + 0x11, + // flag: root + has pointer + map data + 0xc8, + + // extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // array of inlined slab extra data + 0x81, + // element 0 + // inlined array extra data + 0xd8, 0xf7, + 0x81, + // type info + 0x18, 0x2b, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element: [aaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaa] + 0x82, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x76, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + // element: [bbbbbbbbbbbbbbbbbbbbbb:inlined array] + 0x82, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + + // value: inlined array (tag: CBORTagInlinedArray) + 0xd8, 0xfa, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined array elements (1 element) + 0x99, 0x00, 0x01, + // SlabID{...3} + 0xd8, 0xff, 0x50, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + }, + + // grand array data slab + id2: { + // version + 0x10, + // flag: root + array data + 0x80, + // extra data (CBOR encoded array of 1 elements) + 0x81, + // type info + 0x18, 0x2c, + + // CBOR encoded array head (fixed size 3 byte) + 0x99, 0x00, 0x05, + // CBOR encoded array elements + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + 0x76, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, 0x62, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + require.Equal(t, expected[id2], stored[id2]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("pointer to storable slab", func(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := Uint64Value(0) + v := Uint64Value(0) + + digests := []Digest{Digest(0), Digest(1)} + digesterBuilder.On("Digest", k).Return(mockDigester{d: digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.Equal(t, uint64(1), m.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + id2 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 2}} + + expectedNoPointer := []byte{ + + // version + 0x10, + // flag: root + map data + 0x88, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 1) + 0x59, 0x00, 0x08, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), uint64(0)] + 0x82, 0xd8, 0xa4, 0x00, 0xd8, 0xa4, 0x00, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + require.Equal(t, 1, len(stored)) + require.Equal(t, expectedNoPointer, stored[id1]) + + // Overwrite existing value with long string + vs := NewStringValue(strings.Repeat("a", 128)) + existingStorable, err = m.Set(compare, hashInputProvider, k, vs) + require.NoError(t, err) + + existingValue, err := existingStorable.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, v, existingValue) + + expectedHasPointer := []byte{ + + // version + 0x10, + // flag: root + pointer + map data + 0xc8, + // extra data (CBOR encoded array of 3 elements) + 0x83, + // type info: "map" + 0x18, 0x2A, + // count: 10 + 0x01, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, // hkeys (byte string of length 8 * 1) 0x59, 0x00, 0x08, // hkey: 0 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // elements (array of 1 elements) - // each element is encoded as CBOR array of 2 elements (key, value) - 0x99, 0x00, 0x01, - // element: [uint64(0), slab id] - 0x82, 0xd8, 0xa4, 0x00, - // (tag content: slab id) - 0xd8, 0xff, 0x50, - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // elements (array of 1 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x01, + // element: [uint64(0), slab id] + 0x82, 0xd8, 0xa4, 0x00, + // (tag content: slab id) + 0xd8, 0xff, 0x50, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + } + + expectedStorableSlab := []byte{ + // version + 0x10, + // flag: storable + no size limit + 0x3f, + // "aaaa..." + 0x78, 0x80, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, 0x61, + } + + stored, err = storage.Encode() + require.NoError(t, err) + require.Equal(t, 2, len(stored)) + require.Equal(t, expectedHasPointer, stored[id1]) + require.Equal(t, expectedStorableSlab, stored[id2]) + }) + + t.Run("same composite with one field", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count + 0x01, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x48, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 1 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (same order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("amount") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, 0x3b, 0xef, 0x5b, 0xe2, 0x9b, 0x8d, 0xf9, 0x65, 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["amount", "uuid"] + 0x82, 0x66, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with two fields (different order)", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 1 inlined slab extra data + 0x81, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // 0x99, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, 0x82, 0xd8, 0xa4, 0x0, 0xd8, 0xa4, 0x0, 0x82, 0xd8, 0xa4, 0x1, 0xd8, 0xfc, 0x83, 0x18, 0x0, 0x48, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3, 0x82, 0xd8, 0xa4, 0x2, 0xd8, 0xa4, 0x1 + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 2 + 0xd8, 0xa4, 0x02, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("same composite with different number of fields", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo := testCompositeTypeInfo{43} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 2 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var k, v Value + + // Create child map, composite with one field "uuid" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), childMapTypeInfo) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + if i == 0 { + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 2 + 0x02, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 0 + // inlined map extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 1 + 0x01, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x48, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + // composite keys ["uuid"] + 0x81, 0x64, 0x75, 0x75, 0x69, 0x64, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 2) + 0x59, 0x00, 0x10, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + + // elements (array of 2 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x02, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x81, + // value: 1 + 0xd8, 0xa4, 0x01, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) + + t.Run("different composite", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + childMapTypeInfo1 := testCompositeTypeInfo{43} + childMapTypeInfo2 := testCompositeTypeInfo{44} + + // Create and populate map in memory + storage := newTestBasicStorage(t) + + digesterBuilder := &mockDigesterBuilder{} + + // Create map + parentMap, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + const mapSize = 4 + keyValues := make(map[Value]Value, mapSize) + // fields are ordered differently because of different seed. + for i := uint64(0); i < mapSize; i++ { + var ti TypeInfo + if i%2 == 0 { + ti = childMapTypeInfo1 + } else { + ti = childMapTypeInfo2 + } + + var k, v Value + + // Create child map, composite with two field "uuid" and "a" + childMap, err := NewMap(storage, address, NewDefaultDigesterBuilder(), ti) + require.NoError(t, err) + + k = NewStringValue("uuid") + v = Uint64Value(i) + + // Insert element to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = NewStringValue("a") + v = Uint64Value(i * 2) + + // Insert element to child map + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + k = Uint64Value(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) + + keyValues[k] = childMap + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + + id1 := SlabID{address: address, index: SlabIndex{0, 0, 0, 0, 0, 0, 0, 1}} + + // Expected serialized slab data with slab id + expected := map[SlabID][]byte{ + id1: { + // version, has inlined slab + 0x11, + // flag: root + map data + 0x88, + + // slab extra data + // CBOR encoded array of 3 elements + 0x83, + // type info + 0x18, 0x2a, + // count: 4 + 0x04, + // seed + 0x1b, 0x52, 0xa8, 0x78, 0x3, 0x85, 0x2c, 0xaa, 0x49, + + // 2 inlined slab extra data + 0x82, + // element 0 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2b, + // count: 2 + 0x02, + // seed + 0x1b, 0xa9, 0x3a, 0x2d, 0x6f, 0x53, 0x49, 0xaa, 0xdd, + // composite digests + 0x50, + 0x42, 0xa5, 0xa2, 0x7f, 0xb3, 0xc9, 0x0c, 0xa1, + 0x4c, 0x1f, 0x34, 0x74, 0x38, 0x15, 0x64, 0xe5, + // composite keys ["a", "uuid"] + 0x82, 0x61, 0x61, 0x64, 0x75, 0x75, 0x69, 0x64, + // element 1 + // inlined composite extra data + 0xd8, 0xf9, + 0x83, + // map extra data + 0x83, + // type info + 0xd8, 0xf6, 0x18, 0x2c, + // count: 2 + 0x02, + // seed + 0x1b, 0x23, 0xd4, 0xf4, 0x3f, 0x19, 0xf8, 0x95, 0x0a, + // composite digests + 0x50, + 0x74, 0x0a, 0x02, 0xc1, 0x19, 0x6f, 0xb8, 0x9e, + 0xea, 0x8e, 0x6f, 0x69, 0x81, 0x19, 0x68, 0x81, + // composite keys ["uuid", "a"] + 0x82, 0x64, 0x75, 0x75, 0x69, 0x64, 0x61, 0x61, + + // the following encoded data is valid CBOR + + // elements (array of 3 elements) + 0x83, + + // level: 0 + 0x00, + + // hkeys (byte string of length 8 * 4) + 0x59, 0x00, 0x20, + // hkey: 0 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // hkey: 1 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + // hkey: 2 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // hkey: 3 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + + // elements (array of 4 elements) + // each element is encoded as CBOR array of 2 elements (key, value) + 0x99, 0x00, 0x04, + // element 0: + 0x82, + // key: 0 + 0xd8, 0xa4, 0x00, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 0 + 0xd8, 0xa4, 0x00, + // value: 0 + 0xd8, 0xa4, 0x00, + + // element 1: + 0x82, + // key: 1 + 0xd8, 0xa4, 0x01, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 1 + 0xd8, 0xa4, 0x01, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 2: + 0x82, + // key: 2 + 0xd8, 0xa4, 0x02, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 0 + 0x18, 0x00, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 4 + 0xd8, 0xa4, 0x04, + // value: 2 + 0xd8, 0xa4, 0x02, + + // element 3: + 0x82, + // key: 3 + 0xd8, 0xa4, 0x03, + // value: inlined composite (tag: CBORTagInlinedComposite) + 0xd8, 0xfc, + // array of 3 elements + 0x83, + // extra data index 1 + 0x18, 0x01, + // inlined map slab index + 0x48, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + // inlined composite elements (array of 2 elements) + 0x82, + // value: 3 + 0xd8, 0xa4, 0x03, + // value: 6 + 0xd8, 0xa4, 0x06, + }, + } + + // Verify encoded data + stored, err := storage.Encode() + require.NoError(t, err) + + require.Equal(t, len(expected), len(stored)) + require.Equal(t, expected[id1], stored[id1]) + + // Decode data to new storage + storage2 := newTestPersistentStorageWithData(t, stored) + + // Test new map from storage2 + decodedMap, err := NewMapWithRootID(storage2, id1, digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, decodedMap, keyValues, nil, false) + }) +} + +func TestMapEncodeDecodeRandomValues(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Create a new storage with encoded data from base storage + storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + + // Create new map from new storage + m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) + require.NoError(t, err) + + verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) +} + +func TestMapStoredValue(t *testing.T) { + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + keyValues := make(map[Value]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + keyValues[k] = Uint64Value(i) + i++ + } + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + rootID := m.SlabID() + + slabIterator, err := storage.SlabIterator() + require.NoError(t, err) + + for { + id, slab := slabIterator() + + if id == SlabIDUndefined { + break + } + + value, err := slab.StoredValue(storage) + + if id == rootID { + require.NoError(t, err) + + m2, ok := value.(*OrderedMap) + require.True(t, ok) + + verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) + } else { + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var notValueError *NotValueError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, ¬ValueError) + require.ErrorAs(t, fatalError, ¬ValueError) + require.Nil(t, value) + } + } +} + +func TestMapPopIterate(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + i := uint64(0) + err = m.PopIterate(func(k Storable, v Storable) { + i++ + }) + require.NoError(t, err) + require.Equal(t, uint64(0), i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-dataslab", func(t *testing.T) { + const mapSize = 10 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + key, value := Uint64Value(i), Uint64Value(i*10) + sortedKeys[i] = key + keyValues[key] = value + + existingStorable, err := m.Set(compare, hashInputProvider, key, value) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + err = storage.Commit() + require.NoError(t, err) + + require.Equal(t, 1, storage.Count()) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + i := mapSize + err = m.PopIterate(func(k, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("root-metaslab", func(t *testing.T) { + const mapSize = 4096 + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + if _, found := keyValues[k]; !found { + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + i++ + } + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + digesterBuilder := newBasicDigesterBuilder() + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + err = storage.Commit() + require.NoError(t, err) + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + // Iterate key value pairs + i = len(keyValues) + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) + + t.Run("collision", func(t *testing.T) { + //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + r := newRand(t) + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + digesterBuilder := &mockDigesterBuilder{} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value, mapSize) + sortedKeys := make([]Value, mapSize) + i := 0 + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, 16)) + + if _, found := keyValues[k]; !found { + + sortedKeys[i] = k + keyValues[k] = NewStringValue(randStr(r, 16)) + + digests := []Digest{ + Digest(i % 100), + Digest(i % 5), + } + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) + require.NoError(t, err) + require.Nil(t, existingStorable) + + i++ + } + } + + sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + + err = storage.Commit() + require.NoError(t, err) + + // Iterate key value pairs + i = mapSize + err = m.PopIterate(func(k Storable, v Storable) { + i-- + + kv, err := k.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + + vv, err := v.StoredValue(storage) + require.NoError(t, err) + valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + }) + + require.NoError(t, err) + require.Equal(t, 0, i) + + verifyEmptyMap(t, storage, typeInfo, address, m) + }) +} + +func TestEmptyMap(t *testing.T) { + + t.Parallel() + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) + require.NoError(t, err) + + t.Run("get", func(t *testing.T) { + s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, s) + }) + + t.Run("remove", func(t *testing.T) { + existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) + require.Equal(t, 1, errorCategorizationCount(err)) + var userError *UserError + var keyNotFoundError *KeyNotFoundError + require.ErrorAs(t, err, &userError) + require.ErrorAs(t, err, &keyNotFoundError) + require.ErrorAs(t, userError, &keyNotFoundError) + require.Nil(t, existingKey) + require.Nil(t, existingValue) + }) + + t.Run("iterate", func(t *testing.T) { + i := 0 + err := m.Iterate(func(k Value, v Value) (bool, error) { + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, 0, i) + }) + + t.Run("count", func(t *testing.T) { + count := m.Count() + require.Equal(t, uint64(0), count) + }) + + t.Run("type", func(t *testing.T) { + require.True(t, typeInfoComparator(typeInfo, m.Type())) + }) + + t.Run("address", func(t *testing.T) { + require.Equal(t, address, m.Address()) + }) + + // TestMapEncodeDecode/empty tests empty map encoding and decoding +} + +func TestMapFromBatchData(t *testing.T) { + + t.Run("empty", func(t *testing.T) { + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + require.Equal(t, uint64(0), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + NewDefaultDigesterBuilder(), + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + return iter.Next() + }) + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + verifyEmptyMap(t, storage, typeInfo, address, copied) + }) + + t.Run("root-dataslab", func(t *testing.T) { + SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + // Create a map with new storage, new address, and original map's elements. + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + + k, v, err := iter.Next() + + // Save key value pair + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, copied.SlabID(), m.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("root-metaslab", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + digesterBuilder := NewDefaultDigesterBuilder() + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("rebalance two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 10 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("merge two data slabs", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 8 + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) + require.NoError(t, err) + require.Nil(t, storable) + } + + storable, err := m.Set( + compare, + hashInputProvider, + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), + ) + require.NoError(t, err) + require.Nil(t, storable) + + require.Equal(t, uint64(mapSize+1), m.Count()) + require.Equal(t, typeInfo, m.Type()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("random", func(t *testing.T) { + SetThreshold(256) + defer SetThreshold(1024) + + const mapSize = 4096 + + r := newRand(t) + + typeInfo := testTypeInfo{42} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + NewDefaultDigesterBuilder(), + typeInfo, + ) + require.NoError(t, err) + + for m.Count() < mapSize { + k := randomValue(r, int(maxInlineMapElementSize)) + v := randomValue(r, int(maxInlineMapElementSize)) + + _, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + digesterBuilder := NewDefaultDigesterBuilder() + + var sortedKeys []Value + keyValues := make(map[Value]Value, mapSize) + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("collision", func(t *testing.T) { + + const mapSize = 1024 + + SetThreshold(512) + defer SetThreshold(1024) + + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + MaxCollisionLimitPerDigest = mapSize / 2 + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + + k, v := Uint64Value(i), Uint64Value(i*10) + + digests := make([]Digest, 2) + if i%2 == 0 { + digests[0] = 0 + } else { + digests[0] = Digest(i % (mapSize / 2)) + } + digests[1] = Digest(i) + + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + } + + require.Equal(t, uint64(mapSize), m.Count()) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + i := 0 + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + i++ + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) + + t.Run("data slab too large", func(t *testing.T) { + // Slab size must not exceed maxThreshold. + // We cannot make this problem happen after Atree Issue #193 + // was fixed by PR #194 & PR #197. This test is to catch regressions. + + SetThreshold(256) + defer SetThreshold(1024) + + r := newRand(t) + + maxStringSize := int(maxInlineMapKeySize - 2) + + typeInfo := testTypeInfo{42} + + digesterBuilder := &mockDigesterBuilder{} + + m, err := NewMap( + newTestPersistentStorage(t), + Address{1, 2, 3, 4, 5, 6, 7, 8}, + digesterBuilder, + typeInfo, + ) + require.NoError(t, err) + + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + + storable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue(randStr(r, maxStringSize)) + v = NewStringValue(randStr(r, maxStringSize)) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") + v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + + storable, err = m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, storable) + + iter, err := m.Iterator() + require.NoError(t, err) + + var sortedKeys []Value + keyValues := make(map[Value]Value) + + storage := newTestPersistentStorage(t) + address := Address{2, 3, 4, 5, 6, 7, 8, 9} + + copied, err := NewMapFromBatchData( + storage, + address, + digesterBuilder, + m.Type(), + compare, + hashInputProvider, + m.Seed(), + func() (Value, Value, error) { + k, v, err := iter.Next() + + if k != nil { + sortedKeys = append(sortedKeys, k) + keyValues[k] = v + } + + return k, v, err + }) + + require.NoError(t, err) + require.NotEqual(t, m.SlabID(), copied.SlabID()) + + verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + }) +} + +func TestMapNestedStorables(t *testing.T) { + + t.Run("SomeValue", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + keyValues[k] = v + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) + + t.Run("Array", func(t *testing.T) { + + const mapSize = 4096 + + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for i := uint64(0); i < mapSize; i++ { + + // Create a nested array with one element + array, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) + + vs := strings.Repeat("b", int(i)) + v := SomeValue{Value: NewStringValue(vs)} + + err = array.Append(v) + require.NoError(t, err) + + // Insert nested array into map + ks := strings.Repeat("a", int(i)) + k := SomeValue{Value: NewStringValue(ks)} + + keyValues[k] = array + + existingStorable, err := m.Set(compare, hashInputProvider, k, array) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) + }) +} + +func TestMapMaxInlineElement(t *testing.T) { + t.Parallel() + + r := newRand(t) + maxStringSize := int(maxInlineMapKeySize - 2) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) + + keyValues := make(map[Value]Value) + for len(keyValues) < 2 { + // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. + k := NewStringValue(randStr(r, maxStringSize)) + v := NewStringValue(randStr(r, maxStringSize)) + keyValues[k] = v + + _, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + } + + require.True(t, m.root.IsData()) + + // Size of root data slab with two elements (key+value pairs) of + // max inlined size is target slab size minus + // slab id size (next slab id is omitted in root slab) + require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) +} + +func TestMapString(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2]` + require.Equal(t, want, m.String()) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` + require.Equal(t, want, m.String()) + }) +} + +func TestMapSlabDump(t *testing.T) { + + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("small", func(t *testing.T) { + const mapSize = 3 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("large", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", + "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("inline collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", + "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", + "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("external collision", func(t *testing.T) { + const mapSize = 30 + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", + "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", + "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("key overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", + "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) + + t.Run("value overflow", func(t *testing.T) { + + digesterBuilder := &mockDigesterBuilder{} + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) + v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) + digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + want := []string{ + "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", + "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + } + dumps, err := DumpMapSlabs(m) + require.NoError(t, err) + require.Equal(t, want, dumps) + }) +} + +func TestMaxCollisionLimitPerDigest(t *testing.T) { + savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest + defer func() { + MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest + }() + + t.Run("collision limit 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 0, + // meaning no collision is allowed at first level. + MaxCollisionLimitPerDigest = uint32(0) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) } - stored, err = storage.Encode() + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("collision limit > 0", func(t *testing.T) { + const mapSize = 1024 + + SetThreshold(256) + defer SetThreshold(1024) + + // Set noncryptographic hash collision limit as 7, + // meaning at most 8 elements in collision group per digest at first level. + MaxCollisionLimitPerDigest = uint32(7) + + digesterBuilder := &mockDigesterBuilder{} + keyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(i) + v := Uint64Value(i) + keyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) + + m, err := NewMap(storage, address, digesterBuilder, typeInfo) require.NoError(t, err) - require.Equal(t, 2, len(stored)) - require.Equal(t, expectedHasPointer, stored[id1]) + + // Insert elements within collision limits + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Insert elements exceeding collision limits + collisionKeyValues := make(map[Value]Value, mapSize) + for i := uint64(0); i < mapSize; i++ { + k := Uint64Value(mapSize + i) + v := Uint64Value(mapSize + i) + collisionKeyValues[k] = v + + digests := []Digest{Digest(i % 128)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + } + + for k, v := range collisionKeyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.Equal(t, 1, errorCategorizationCount(err)) + var fatalError *FatalError + var collisionLimitError *CollisionLimitError + require.ErrorAs(t, err, &fatalError) + require.ErrorAs(t, err, &collisionLimitError) + require.ErrorAs(t, fatalError, &collisionLimitError) + require.Nil(t, existingStorable) + } + + // Verify that no new elements exceeding collision limit inserted + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + + // Update elements within collision limits + for k := range keyValues { + v := Uint64Value(0) + keyValues[k] = v + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } + + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) }) } -func TestMapEncodeDecodeRandomValues(t *testing.T) { +func TestMapLoadedValueIterator(t *testing.T) { SetThreshold(256) defer SetThreshold(1024) - r := newRand(t) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, keyValues := testMapSetRemoveRandomValues(t, r, storage, typeInfo, address) + t.Run("empty", func(t *testing.T) { + storage := newTestPersistentStorage(t) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + digesterBuilder := &mockDigesterBuilder{} - // Create a new storage with encoded data from base storage - storage2 := newTestPersistentStorageWithBaseStorage(t, storage.baseStorage) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // Create new map from new storage - m2, err := NewMapWithRootID(storage2, m.SlabID(), m.digesterBuilder) - require.NoError(t, err) + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage2, typeInfo, address, m2, keyValues, nil, false) -} + verifyMapLoadedElements(t, m, nil) + }) -func TestMapStoredValue(t *testing.T) { + t.Run("root data slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + require.Equal(t, 1, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values in external collision group", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + // Create parent map with 3 external collision group, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) + + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) + + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + v := values[i][1] - const mapSize = 4096 + nestedArray, ok := v.(*Array) + require.True(t, ok) - r := newRand(t) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - keyValues := make(map[Value]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - keyValues[k] = Uint64Value(i) - i++ - } + t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - rootID := m.SlabID() + verifyMapLoadedElements(t, m, values) - slabIterator, err := storage.SlabIterator() - require.NoError(t, err) + // Unload external key from front to back. + for i := 0; i < len(values); i++ { + k := values[i][0] - for { - id, slab := slabIterator() + s, ok := k.(StringValue) + require.True(t, ok) - if id == SlabIDUndefined { - break - } + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - value, err := slab.StoredValue(storage) + require.NoError(t, keyID.Valid()) - if id == rootID { + err := storage.Remove(keyID) require.NoError(t, err) - m2, ok := value.(*OrderedMap) - require.True(t, ok) - - verifyMap(t, storage, typeInfo, address, m2, keyValues, nil, false) - } else { - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var notValueError *NotValueError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, ¬ValueError) - require.ErrorAs(t, fatalError, ¬ValueError) - require.Nil(t, value) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) } - } -} - -func TestMapPopIterate(t *testing.T) { + }) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - err = storage.Commit() - require.NoError(t, err) + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - require.Equal(t, 1, storage.Count()) + verifyMapLoadedElements(t, m, values) - i := uint64(0) - err = m.PopIterate(func(k Storable, v Storable) { - i++ - }) - require.NoError(t, err) - require.Equal(t, uint64(0), i) + // Unload composite element from front to back. + for i := 0; i < len(values); i++ { + v := values[i][1] - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + nestedArray, ok := v.(*Array) + require.True(t, ok) - t.Run("root-dataslab", func(t *testing.T) { - const mapSize = 10 + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) + + t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := newBasicDigesterBuilder() - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - key, value := Uint64Value(i), Uint64Value(i*10) - sortedKeys[i] = key - keyValues[key] = value + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - existingStorable, err := m.Set(compare, hashInputProvider, key, value) + verifyMapLoadedElements(t, m, values) + + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i][1] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) + + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - err = storage.Commit() - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - require.Equal(t, 1, storage.Count()) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + verifyMapLoadedElements(t, m, values) - i := mapSize - err = m.PopIterate(func(k, v Storable) { - i-- + // Unload external collision group slab from front to back - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() }) - require.NoError(t, err) - require.Equal(t, 0, i) + for i, id := range externalCollisionSlabIDs { + err := storage.Remove(id) + require.NoError(t, err) - verifyEmptyMap(t, storage, typeInfo, address, m) + expectedValues := values[i*4+4:] + verifyMapLoadedElements(t, m, expectedValues) + } }) - t.Run("root-metaslab", func(t *testing.T) { - const mapSize = 4096 - - r := newRand(t) + t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) - if _, found := keyValues[k]; !found { - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) - i++ - } - } + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) - digesterBuilder := newBasicDigesterBuilder() + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + verifyMapLoadedElements(t, m, values) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + // Unload composite element from back to front. + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] + + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) - } - err = storage.Commit() - require.NoError(t, err) + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Iterate key value pairs - i = len(keyValues) - err = m.PopIterate(func(k Storable, v Storable) { - i-- + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + // parent map: 1 root data slab + // long string keys: 1 storable slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + verifyMapLoadedElements(t, m, values) - require.NoError(t, err) - require.Equal(t, 0, i) + // Unload composite element from front to back. + for i := len(values) - 1; i >= 0; i-- { + k := values[i][0] - verifyEmptyMap(t, storage, typeInfo, address, m) - }) + s, ok := k.(StringValue) + require.True(t, ok) - t.Run("collision", func(t *testing.T) { - //MetaDataSlabCount:1 DataSlabCount:13 CollisionDataSlabCount:100 + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } + } - const mapSize = 1024 + require.NoError(t, keyID.Valid()) - SetThreshold(512) - defer SetThreshold(1024) + err := storage.Remove(keyID) + require.NoError(t, err) - r := newRand(t) + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - digesterBuilder := &mockDigesterBuilder{} + t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { storage := newTestPersistentStorage(t) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) - - keyValues := make(map[Value]Value, mapSize) - sortedKeys := make([]Value, mapSize) - i := 0 - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, 16)) + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - if _, found := keyValues[k]; !found { + // parent map: 1 root data slab + // composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - sortedKeys[i] = k - keyValues[k] = NewStringValue(randStr(r, 16)) + verifyMapLoadedElements(t, m, values) - digests := []Digest{ - Digest(i % 100), - Digest(i % 5), - } + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + nestedArray, ok := v.(*Array) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, keyValues[k]) - require.NoError(t, err) - require.Nil(t, existingStorable) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - i++ - } + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) } + }) - sort.Stable(keysByDigest{sortedKeys, digesterBuilder}) + t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - err = storage.Commit() - require.NoError(t, err) + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - // Iterate key value pairs - i = mapSize - err = m.PopIterate(func(k Storable, v Storable) { - i-- + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - kv, err := k.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, sortedKeys[i], kv) + verifyMapLoadedElements(t, m, values) - vv, err := v.StoredValue(storage) - require.NoError(t, err) - valueEqual(t, typeInfoComparator, keyValues[sortedKeys[i]], vv) - }) + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - require.NoError(t, err) - require.Equal(t, 0, i) + nestedArray, ok := v.(*Array) + require.True(t, ok) - verifyEmptyMap(t, storage, typeInfo, address, m) - }) -} + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) -func TestEmptyMap(t *testing.T) { + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - t.Parallel() + t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - m, err := NewMap(storage, address, NewDefaultDigesterBuilder(), typeInfo) - require.NoError(t, err) + // parent map: 1 root data slab, 3 external collision group + // composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("get", func(t *testing.T) { - s, err := m.Get(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, s) - }) + verifyMapLoadedElements(t, m, values) - t.Run("remove", func(t *testing.T) { - existingKey, existingValue, err := m.Remove(compare, hashInputProvider, Uint64Value(0)) - require.Equal(t, 1, errorCategorizationCount(err)) - var userError *UserError - var keyNotFoundError *KeyNotFoundError - require.ErrorAs(t, err, &userError) - require.ErrorAs(t, err, &keyNotFoundError) - require.ErrorAs(t, userError, &keyNotFoundError) - require.Nil(t, existingKey) - require.Nil(t, existingValue) - }) + // Unload external slabs from back to front + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) + } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - t.Run("iterate", func(t *testing.T) { - i := 0 - err := m.Iterate(func(k Value, v Value) (bool, error) { - i++ - return true, nil + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() }) - require.NoError(t, err) - require.Equal(t, 0, i) - }) - t.Run("count", func(t *testing.T) { - count := m.Count() - require.Equal(t, uint64(0), count) + for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { + err := storage.Remove(externalCollisionSlabIDs[i]) + require.NoError(t, err) + + expectedValues := values[:i*4] + verifyMapLoadedElements(t, m, expectedValues) + } }) - t.Run("type", func(t *testing.T) { - require.True(t, typeInfoComparator(typeInfo, m.Type())) - }) + t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - t.Run("address", func(t *testing.T) { - require.Equal(t, address, m.Address()) - }) + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - // TestMapEncodeDecode/empty tests empty map encoding and decoding -} + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) -func TestMapFromBatchData(t *testing.T) { + verifyMapLoadedElements(t, m, values) - t.Run("empty", func(t *testing.T) { - typeInfo := testTypeInfo{42} + // Unload value in the middle + unloadValueIndex := 1 - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) - require.Equal(t, uint64(0), m.Count()) + v := values[unloadValueIndex][1] - iter, err := m.Iterator() + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) + }) + + t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( - storage, - address, - NewDefaultDigesterBuilder(), - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - return iter.Next() - }) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + const mapSize = 3 + m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) - verifyEmptyMap(t, storage, typeInfo, address, copied) - }) + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("root-dataslab", func(t *testing.T) { - SetThreshold(1024) + verifyMapLoadedElements(t, m, values) - const mapSize = 10 + // Unload key in the middle. + unloadValueIndex := 1 - typeInfo := testTypeInfo{42} + k := values[unloadValueIndex][0] - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + s, ok := k.(StringValue) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) + // Find storage id for StringValue s. + var keyID SlabID + for id, slab := range storage.deltas { + if sslab, ok := slab.(*StorableSlab); ok { + if other, ok := sslab.storable.(StringValue); ok { + if s.str == other.str { + keyID = id + break + } + } + } } - require.Equal(t, uint64(mapSize), m.Count()) + require.NoError(t, keyID.Valid()) - iter, err := m.Iterator() + err := storage.Remove(keyID) require.NoError(t, err) - var sortedKeys []Value - keyValues := make(map[Value]Value) + copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) + }) + t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - // Create a map with new storage, new address, and original map's elements. - copied, err := NewMapFromBatchData( + // Create parent map with 3 collision groups, 2 elements in each group. + const mapSize = 6 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - - k, v, err := iter.Next() + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, + ) - // Save key value pair - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - return k, v, err - }) + verifyMapLoadedElements(t, m, values) - require.NoError(t, err) - require.NotEqual(t, copied.SlabID(), m.SlabID()) + // Unload composite element in the middle + for _, unloadValueIndex := range []int{1, 3, 5} { + v := values[unloadValueIndex][1] - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + nestedArray, ok := v.(*Array) + require.True(t, ok) - t.Run("root-metaslab", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } - const mapSize = 4096 + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + } + verifyMapLoadedElements(t, m, expectedValues) + }) - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - require.Equal(t, uint64(mapSize), m.Count()) + verifyMapLoadedElements(t, m, values) - iter, err := m.Iterator() - require.NoError(t, err) + // Unload composite value in the middle. + for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { + v := values[unloadValueIndex][1] - var sortedKeys []Value - keyValues := make(map[Value]Value) + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } + + expectedValues := [][2]Value{ + values[0], + values[2], + values[4], + values[6], + values[8], + values[10], + } + verifyMapLoadedElements(t, m, expectedValues) + }) + t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - digesterBuilder := NewDefaultDigesterBuilder() - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - copied, err := NewMapFromBatchData( + // Create parent map with 3 external collision groups, 4 elements in the group. + const mapSize = 12 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, + ) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v + // parent map: 1 root data slab, 3 external collision group + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+3+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + + verifyMapLoadedElements(t, m, values) + + // Unload external slabs in the middle. + var externalCollisionSlabIDs []SlabID + for id, slab := range storage.deltas { + if dataSlab, ok := slab.(*MapDataSlab); ok { + if dataSlab.collisionGroup { + externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) } + } + } + require.Equal(t, 3, len(externalCollisionSlabIDs)) - return k, v, err - }) + sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { + a := externalCollisionSlabIDs[i] + b := externalCollisionSlabIDs[j] + if a.address == b.address { + return a.IndexAsUint64() < b.IndexAsUint64() + } + return a.AddressAsUint64() < b.AddressAsUint64() + }) + id := externalCollisionSlabIDs[1] + err := storage.Remove(id) require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) - - t.Run("rebalance two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + copy(values[4:], values[8:]) + values = values[:8] - const mapSize = 10 + verifyMapLoadedElements(t, m, values) + }) - typeInfo := testTypeInfo{42} + t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), + const mapSize = 3 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) - } + // parent map: 1 root data slab + // nested composite elements: 1 root data slab for each + require.Equal(t, 1+mapSize, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapElementSize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))) - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) - require.Equal(t, uint64(mapSize+1), m.Count()) + i := 0 + err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + // At this point, iterator returned first element (v). - iter, err := m.Iterator() - require.NoError(t, err) + // Remove all other nested composite elements (except first element) from storage. + for _, element := range values[1:] { + value := element[1] + nestedArray, ok := value.(*Array) + require.True(t, ok) - var sortedKeys []Value - keyValues := make(map[Value]Value) + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) + } - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() + require.Equal(t, 0, i) + valueEqual(t, typeInfoComparator, values[0][0], k) + valueEqual(t, typeInfoComparator, values[0][1], v) + i++ + return true, nil + }) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + require.NoError(t, err) + require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. + }) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { + const mapSize = 3 - return k, v, err - }) + // Create a map with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + m, values := createMapWithSimpleAndCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + nestedCompositeIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + // parent map: 1 root data slab + // composite element: 1 root data slab + require.Equal(t, 2, len(storage.deltas)) + require.Equal(t, 0, getMapMetaDataSlabCount(storage)) - t.Run("merge two data slabs", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + verifyMapLoadedElements(t, m, values) - const mapSize = 8 + // Unload composite value + v := values[nestedCompositeIndex][1].(*Array) - typeInfo := testTypeInfo{42} + err := storage.Remove(v.SlabID()) + require.NoError(t, err) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] - for i := uint64(0); i < mapSize; i++ { - storable, err := m.Set(compare, hashInputProvider, Uint64Value(i), Uint64Value(i*10)) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) } + }) - storable, err := m.Set( - compare, - hashInputProvider, - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize-2))), - ) - require.NoError(t, err) - require.Nil(t, storable) + t.Run("root metadata slab with simple values", func(t *testing.T) { + storage := newTestPersistentStorage(t) - require.Equal(t, uint64(mapSize+1), m.Count()) - require.Equal(t, typeInfo, m.Type()) + const mapSize = 20 + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - iter, err := m.Iterator() - require.NoError(t, err) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - var sortedKeys []Value - keyValues := make(map[Value]Value) + verifyMapLoadedElements(t, m, values) + }) + t.Run("root metadata slab with composite values", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() - copied, err := NewMapFromBatchData( + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() - - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } - - return k, v, err - }) + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + verifyMapLoadedElements(t, m, values) }) - t.Run("random", func(t *testing.T) { - SetThreshold(256) - defer SetThreshold(1024) + t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - const mapSize = 4096 + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - r := newRand(t) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values : 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - typeInfo := testTypeInfo{42} + verifyMapLoadedElements(t, m, values) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - NewDefaultDigesterBuilder(), - typeInfo, - ) - require.NoError(t, err) + // Unload composite element from front to back + for i := 0; i < len(values); i++ { + v := values[i][1] - for m.Count() < mapSize { - k := randomValue(r, int(maxInlineMapElementSize)) - v := randomValue(r, int(maxInlineMapElementSize)) + nestedArray, ok := v.(*Array) + require.True(t, ok) - _, err = m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - } - require.Equal(t, uint64(mapSize), m.Count()) - - iter, err := m.Iterator() - require.NoError(t, err) + expectedValues := values[i+1:] + verifyMapLoadedElements(t, m, expectedValues) + } + }) + t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} - digesterBuilder := NewDefaultDigesterBuilder() - - var sortedKeys []Value - keyValues := make(map[Value]Value, mapSize) - copied, err := NewMapFromBatchData( + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, storage, address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() - - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } - - return k, v, err - }) - - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - t.Run("collision", func(t *testing.T) { + verifyMapLoadedElements(t, m, values) - const mapSize = 1024 + // Unload composite element from back to front + for i := len(values) - 1; i >= 0; i-- { + v := values[i][1] - SetThreshold(512) - defer SetThreshold(1024) + nestedArray, ok := v.(*Array) + require.True(t, ok) - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() - MaxCollisionLimitPerDigest = mapSize / 2 + err := storage.Remove(nestedArray.SlabID()) + require.NoError(t, err) - typeInfo := testTypeInfo{42} + expectedValues := values[:i] + verifyMapLoadedElements(t, m, expectedValues) + } + }) - digesterBuilder := &mockDigesterBuilder{} + t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { + storage := newTestPersistentStorage(t) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, + const mapSize = 20 + m, values := createMapWithCompositeValues( + t, + storage, + address, typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, ) - require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 4+mapSize, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - k, v := Uint64Value(i), Uint64Value(i*10) + verifyMapLoadedElements(t, m, values) - digests := make([]Digest, 2) - if i%2 == 0 { - digests[0] = 0 - } else { - digests[0] = Digest(i % (mapSize / 2)) - } - digests[1] = Digest(i) + // Unload composite element in the middle + for _, index := range []int{4, 14} { - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + v := values[index][1] - storable, err := m.Set(compare, hashInputProvider, k, v) + nestedArray, ok := v.(*Array) + require.True(t, ok) + + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, storable) + + copy(values[index:], values[index+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) } + }) - require.Equal(t, uint64(mapSize), m.Count()) + t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { + const mapSize = 20 - iter, err := m.Iterator() - require.NoError(t, err) + // Create a map with nested composite value at specified index + for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { + storage := newTestPersistentStorage(t) - var sortedKeys []Value - keyValues := make(map[Value]Value) + m, values := createMapWithSimpleAndCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + nestedCompositeIndex, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + // parent map (2 levels): 1 root metadata slab, 3 data slabs + // composite values: 1 root data slab for each + require.Equal(t, 5, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - i := 0 - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + verifyMapLoadedElements(t, m, values) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + v := values[nestedCompositeIndex][1].(*Array) - i++ - return k, v, err - }) + err := storage.Remove(v.SlabID()) + require.NoError(t, err) - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) + values = values[:len(values)-1] - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) + verifyMapLoadedElements(t, m, values) + } }) - t.Run("data slab too large", func(t *testing.T) { - // Slab size must not exceed maxThreshold. - // We cannot make this problem happen after Atree Issue #193 - // was fixed by PR #194 & PR #197. This test is to catch regressions. + t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - SetThreshold(256) - defer SetThreshold(1024) + const mapSize = 20 - r := newRand(t) + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - maxStringSize := int(maxInlineMapKeySize - 2) + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - typeInfo := testTypeInfo{42} + verifyMapLoadedElements(t, m, values) - digesterBuilder := &mockDigesterBuilder{} + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - m, err := NewMap( - newTestPersistentStorage(t), - Address{1, 2, 3, 4, 5, 6, 7, 8}, - digesterBuilder, - typeInfo, - ) - require.NoError(t, err) + // Unload data slabs from front to back + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3881892766069237908}}) + childHeader := rootMetaDataSlab.childrenHeaders[i] - storable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + // Get data slab element count before unload it from storage. + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - k = NewStringValue(randStr(r, maxStringSize)) - v = NewStringValue(randStr(r, maxStringSize)) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3882976639190041664}}) + count := mapDataSlab.elements.Count() - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - k = NewStringValue("zFKUYYNfIfJCCakcDuIEHj") - v = NewStringValue("EZbaCxxjDtMnbRlXJMgfHnZ") - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{3883321011075439822}}) + values = values[count:] - storable, err = m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, storable) + verifyMapLoadedElements(t, m, values) + } + }) - iter, err := m.Iterator() - require.NoError(t, err) + t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - var sortedKeys []Value - keyValues := make(map[Value]Value) + const mapSize = 20 + + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) + + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - storage := newTestPersistentStorage(t) - address := Address{2, 3, 4, 5, 6, 7, 8, 9} + verifyMapLoadedElements(t, m, values) - copied, err := NewMapFromBatchData( - storage, - address, - digesterBuilder, - m.Type(), - compare, - hashInputProvider, - m.Seed(), - func() (Value, Value, error) { - k, v, err := iter.Next() + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - if k != nil { - sortedKeys = append(sortedKeys, k) - keyValues[k] = v - } + // Unload data slabs from back to front + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - return k, v, err - }) + childHeader := rootMetaDataSlab.childrenHeaders[i] - require.NoError(t, err) - require.NotEqual(t, m.SlabID(), copied.SlabID()) + // Get data slab element count before unload it from storage + // Element count isn't in the header. + mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) + require.True(t, ok) - verifyMap(t, storage, typeInfo, address, copied, keyValues, sortedKeys, false) - }) -} + count := mapDataSlab.elements.Count() -func TestMapNestedStorables(t *testing.T) { + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - t.Run("SomeValue", func(t *testing.T) { + values = values[:len(values)-int(count)] - const mapSize = 4096 + verifyMapLoadedElements(t, m, values) + } + }) - typeInfo := testTypeInfo{42} + t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + const mapSize = 20 - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + // parent map (2 levels): 1 root metadata slab, 3 data slabs + require.Equal(t, 4, len(storage.deltas)) + require.Equal(t, 1, getMapMetaDataSlabCount(storage)) - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + verifyMapLoadedElements(t, m, values) - keyValues[k] = v + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) + index := 1 + childHeader := rootMetaDataSlab.childrenHeaders[index] - t.Run("Array", func(t *testing.T) { + // Get element count from previous data slab + mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) + require.True(t, ok) - const mapSize = 4096 + countAtIndex0 := mapDataSlab.elements.Count() - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Get element count from slab to be unloaded + mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) + require.True(t, ok) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + countAtIndex1 := mapDataSlab.elements.Count() - keyValues := make(map[Value]Value) - for i := uint64(0); i < mapSize; i++ { + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - // Create a nested array with one element - array, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) + values = values[:m.Count()-uint64(countAtIndex1)] - vs := strings.Repeat("b", int(i)) - v := SomeValue{Value: NewStringValue(vs)} + verifyMapLoadedElements(t, m, values) + }) - err = array.Append(v) - require.NoError(t, err) + t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Insert nested array into map - ks := strings.Repeat("a", int(i)) - k := SomeValue{Value: NewStringValue(ks)} + const mapSize = 200 - keyValues[k] = array + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - existingStorable, err := m.Set(compare, hashInputProvider, k, array) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, true) - }) -} + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) -func TestMapMaxInlineElement(t *testing.T) { - t.Parallel() + // Unload non-root metadata slabs from front to back. + for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { - r := newRand(t) - maxStringSize := int(maxInlineMapKeySize - 2) - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childHeader := rootMetaDataSlab.childrenHeaders[i] - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - keyValues := make(map[Value]Value) - for len(keyValues) < 2 { - // String length is maxInlineMapKeySize - 2 to account for string encoding overhead. - k := NewStringValue(randStr(r, maxStringSize)) - v := NewStringValue(randStr(r, maxStringSize)) - keyValues[k] = v + // Use firstKey to deduce number of elements in slab. + var expectedValues [][2]Value + if i < len(rootMetaDataSlab.childrenHeaders)-1 { + nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] + expectedValues = values[int(nextChildHeader.firstKey):] + } - _, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - } + verifyMapLoadedElements(t, m, expectedValues) + } + }) - require.True(t, m.root.IsData()) + t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { + storage := newTestPersistentStorage(t) - // Size of root data slab with two elements (key+value pairs) of - // max inlined size is target slab size minus - // slab id size (next slab id is omitted in root slab) - require.Equal(t, targetThreshold-slabIDSize, uint64(m.root.Header().size)) + const mapSize = 200 - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) -} + m, values := createMapWithSimpleValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) -func TestMapString(t *testing.T) { + // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs + require.Equal(t, 4, getMapMetaDataSlabCount(storage)) - SetThreshold(256) - defer SetThreshold(1024) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + // Unload non-root metadata slabs from back to front. + for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + childHeader := rootMetaDataSlab.childrenHeaders[i] - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + err := storage.Remove(childHeader.slabID) + require.NoError(t, err) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + // Use firstKey to deduce number of elements in slabs. + values = values[:childHeader.firstKey] - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + verifyMapLoadedElements(t, m, values) } - - want := `[0:0 1:1 2:2]` - require.Equal(t, want, m.String()) }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) - - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - want := `[0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 8:8 9:9 10:10 11:11 12:12 13:13 14:14 15:15 16:16 17:17 18:18 19:19 20:20 21:21 22:22 23:23 24:24 25:25 26:26 27:27 28:28 29:29]` - require.Equal(t, want, m.String()) - }) -} + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // nested composite elements: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) -func TestMapSlabDump(t *testing.T) { + verifyMapLoadedElements(t, m, values) - SetThreshold(256) - defer SetThreshold(1024) + r := newRand(t) - t.Run("small", func(t *testing.T) { - const mapSize = 3 + // Unload composite element in random position + for len(values) > 0 { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + i := r.Intn(len(values)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + v := values[i][1] - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + nestedArray, ok := v.(*Array) + require.True(t, ok) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(nestedArray.SlabID()) require.NoError(t, err) - require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:55 firstkey:0 elements: [0:0:0 1:1:1 2:2:2]", + copy(values[i:], values[i+1:]) + values = values[:len(values)-1] + + verifyMapLoadedElements(t, m, values) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) }) - t.Run("large", func(t *testing.T) { - const mapSize = 30 + t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i)}}) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMapLoadedElements(t, m, values) - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:221 firstKey:0} {id:0x102030405060708.3 size:293 firstKey:13}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:221 firstkey:0 elements: [0:0:0 1:1:1 2:2:2 3:3:3 4:4:4 5:5:5 6:6:6 7:7:7 8:8:8 9:9:9 10:10:10 11:11:11 12:12:12]", - "level 2, MapDataSlab id:0x102030405060708.3 size:293 firstkey:13 elements: [13:13:13 14:14:14 15:15:15 16:16:16 17:17:17 18:18:18 19:19:19 20:20:20 21:21:21 22:22:22 23:23:23 24:24:24 25:25:25 26:26:26 27:27:27 28:28:28 29:29:29]", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - t.Run("inline collision", func(t *testing.T) { - const mapSize = 30 + type slabInfo struct { + id SlabID + startIndex int + count int + } - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + var dataSlabInfos []*slabInfo + for _, mheader := range rootMetaDataSlab.childrenHeaders { - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 10)}}) + for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { + h := nonRootMetaDataSlab.childrenHeaders[i] - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + if len(dataSlabInfos) > 0 { + // Update previous slabInfo.count + dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex + } - want := []string{ - "level 1, MapMetaDataSlab id:0x102030405060708.1 size:48 firstKey:0 children: [{id:0x102030405060708.2 size:213 firstKey:0} {id:0x102030405060708.3 size:221 firstKey:5}]", - "level 2, MapDataSlab id:0x102030405060708.2 size:213 firstkey:0 elements: [0:inline[:0:0 :10:10 :20:20] 1:inline[:1:1 :11:11 :21:21] 2:inline[:2:2 :12:12 :22:22] 3:inline[:3:3 :13:13 :23:23] 4:inline[:4:4 :14:14 :24:24]]", - "level 2, MapDataSlab id:0x102030405060708.3 size:221 firstkey:5 elements: [5:inline[:5:5 :15:15 :25:25] 6:inline[:6:6 :16:16 :26:26] 7:inline[:7:7 :17:17 :27:27] 8:inline[:8:8 :18:18 :28:28] 9:inline[:9:9 :19:19 :29:29]]", + dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) + } } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("external collision", func(t *testing.T) { - const mapSize = 30 + r := newRand(t) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + for len(dataSlabInfos) > 0 { + index := r.Intn(len(dataSlabInfos)) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + slabToBeRemoved := dataSlabInfos[index] - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(i % 2)}}) + // Update startIndex for all subsequence data slabs + for i := index + 1; i < len(dataSlabInfos); i++ { + dataSlabInfos[i].startIndex -= slabToBeRemoved.count + } - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(slabToBeRemoved.id) require.NoError(t, err) - require.Nil(t, existingStorable) - } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:68 firstkey:0 elements: [0:external(0x102030405060708.2) 1:external(0x102030405060708.3)]", - "collision: MapDataSlab id:0x102030405060708.2 size:135 firstkey:0 elements: [:0:0 :2:2 :4:4 :6:6 :8:8 :10:10 :12:12 :14:14 :16:16 :18:18 :20:20 :22:22 :24:24 :26:26 :28:28]", - "collision: MapDataSlab id:0x102030405060708.3 size:135 firstkey:0 elements: [:1:1 :3:3 :5:5 :7:7 :9:9 :11:11 :13:13 :15:15 :17:17 :19:19 :21:21 :23:23 :25:25 :27:27 :29:29]", + if index == len(dataSlabInfos)-1 { + values = values[:slabToBeRemoved.startIndex] + } else { + copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) + values = values[:len(values)-slabToBeRemoved.count] + } + + copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) + dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + + verifyMapLoadedElements(t, m, values) } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) + + require.Equal(t, 0, len(values)) }) - t.Run("key overflow", func(t *testing.T) { + t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + const mapSize = 500 + m, values := createMapWithCompositeValues( + t, + storage, + address, + typeInfo, + mapSize, + func(i int) []Digest { return []Digest{Digest(i)} }, + ) - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapKeySize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs + // composite values: 1 root data slab for each + require.True(t, len(storage.deltas) > 1+mapSize) + require.True(t, getMapMetaDataSlabCount(storage) > 1) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + verifyMapLoadedElements(t, m, values) - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:93 firstkey:0 elements: [0:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]}):bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb]", - "StorableSlab id:0x102030405060708.2 storable:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + type slabInfo struct { + id SlabID + startIndex int + count int + children []*slabInfo } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) - t.Run("value overflow", func(t *testing.T) { + rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) + require.True(t, ok) - digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) + for i, mheader := range rootMetaDataSlab.childrenHeaders { - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + if i > 0 { + prevMetaDataSlabInfo := metadataSlabInfos[i-1] + prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] - k := NewStringValue(strings.Repeat("a", int(maxInlineMapKeySize-2))) - v := NewStringValue(strings.Repeat("b", int(maxInlineMapElementSize))) - digesterBuilder.On("Digest", k).Return(mockDigester{d: []Digest{Digest(0)}}) + // Update previous metadata slab count + prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + // Update previous data slab count + prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex + } - want := []string{ - "level 1, MapDataSlab id:0x102030405060708.1 size:91 firstkey:0 elements: [0:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:SlabIDStorable({[1 2 3 4 5 6 7 8] [0 0 0 0 0 0 0 2]})]", - "StorableSlab id:0x102030405060708.2 storable:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", - } - dumps, err := DumpMapSlabs(m) - require.NoError(t, err) - require.Equal(t, want, dumps) - }) -} + metadataSlabInfo := &slabInfo{ + id: mheader.slabID, + startIndex: int(mheader.firstKey), + } -func TestMaxCollisionLimitPerDigest(t *testing.T) { - savedMaxCollisionLimitPerDigest := MaxCollisionLimitPerDigest - defer func() { - MaxCollisionLimitPerDigest = savedMaxCollisionLimitPerDigest - }() + nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) + require.True(t, ok) - t.Run("collision limit 0", func(t *testing.T) { - const mapSize = 1024 + children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) + for i, h := range nonRootMetadataSlab.childrenHeaders { + children[i] = &slabInfo{ + id: h.slabID, + startIndex: int(h.firstKey), + } + if i > 0 { + children[i-1].count = int(h.firstKey) - children[i-1].startIndex + } + } - SetThreshold(256) - defer SetThreshold(1024) + metadataSlabInfo.children = children + metadataSlabInfos[i] = metadataSlabInfo + } - // Set noncryptographic hash collision limit as 0, - // meaning no collision is allowed at first level. - MaxCollisionLimitPerDigest = uint32(0) + const ( + metadataSlabType int = iota + dataSlabType + maxSlabType + ) - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + r := newRand(t) - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + for len(metadataSlabInfos) > 0 { - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + var slabInfoToBeRemoved *slabInfo + var isLastSlab bool - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + switch r.Intn(maxSlabType) { - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + case metadataSlabType: - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + count := slabInfoToBeRemoved.count - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + // Update startIndex for subsequence metadata slabs + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.NotNil(t, existingStorable) - } + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - t.Run("collision limit > 0", func(t *testing.T) { - const mapSize = 1024 + case dataSlabType: - SetThreshold(256) - defer SetThreshold(1024) + metadataSlabIndex := r.Intn(len(metadataSlabInfos)) - // Set noncryptographic hash collision limit as 7, - // meaning at most 8 elements in collision group per digest at first level. - MaxCollisionLimitPerDigest = uint32(7) + metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] - digesterBuilder := &mockDigesterBuilder{} - keyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(i) - v := Uint64Value(i) - keyValues[k] = v + dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && + (dataSlabIndex == len(metadataSlabInfo.children)-1) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + count := slabInfoToBeRemoved.count - // Insert elements within collision limits - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Update startIndex for all subsequence data slabs in this metadata slab info + for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { + metadataSlabInfo.children[i].startIndex -= count + } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) + metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] - // Insert elements exceeding collision limits - collisionKeyValues := make(map[Value]Value, mapSize) - for i := uint64(0); i < mapSize; i++ { - k := Uint64Value(mapSize + i) - v := Uint64Value(mapSize + i) - collisionKeyValues[k] = v + metadataSlabInfo.count -= count - digests := []Digest{Digest(i % 128)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - } + // Update startIndex for all subsequence metadata slabs. + for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { + metadataSlabInfos[i].startIndex -= count - for k, v := range collisionKeyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.Equal(t, 1, errorCategorizationCount(err)) - var fatalError *FatalError - var collisionLimitError *CollisionLimitError - require.ErrorAs(t, err, &fatalError) - require.ErrorAs(t, err, &collisionLimitError) - require.ErrorAs(t, fatalError, &collisionLimitError) - require.Nil(t, existingStorable) - } + for j := 0; j < len(metadataSlabInfos[i].children); j++ { + metadataSlabInfos[i].children[j].startIndex -= count + } + } - // Verify that no new elements exceeding collision limit inserted - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + if len(metadataSlabInfo.children) == 0 { + copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) + metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + } + } - // Update elements within collision limits - for k := range keyValues { - v := Uint64Value(0) - keyValues[k] = v - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + err := storage.Remove(slabInfoToBeRemoved.id) require.NoError(t, err) - require.NotNil(t, existingStorable) + + if isLastSlab { + values = values[:slabInfoToBeRemoved.startIndex] + } else { + copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) + values = values[:len(values)-slabInfoToBeRemoved.count] + } + + verifyMapLoadedElements(t, m, values) } - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + require.Equal(t, 0, len(values)) }) } -func TestMapLoadedValueIterator(t *testing.T) { +func createMapWithLongStringKey( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, +) (*OrderedMap, [][2]Value) { - SetThreshold(256) - defer SetThreshold(1024) + digesterBuilder := &mockDigesterBuilder{} - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} + // Create parent map. + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - t.Run("empty", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { + s := strings.Repeat(string(r), int(maxInlineMapElementSize)) - digesterBuilder := &mockDigesterBuilder{} + k := NewStringValue(s) + v := Uint64Value(i) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) + expectedValues[i] = [2]Value{k, v} + + digests := []Digest{Digest(i)} + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := m.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + r++ + } - verifyMapLoadedElements(t, m, nil) - }) + return m, expectedValues +} - t.Run("root data slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createMapWithSimpleValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - const mapSize = 3 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + digesterBuilder := &mockDigesterBuilder{} - // parent map: 1 root data slab - require.Equal(t, 1, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) - }) + expectedValues := make([][2]Value, size) + r := rune('a') + for i := 0; i < size; i++ { + k := Uint64Value(i) + v := NewStringValue(strings.Repeat(string(r), 20)) + + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + expectedValues[i] = [2]Value{k, v} - t.Run("root data slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + return m, expectedValues +} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) +func createMapWithCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - verifyMapLoadedElements(t, m, values) - }) + // Use mockDigesterBuilder to guarantee element order. + digesterBuilder := &mockDigesterBuilder{} - t.Run("root data slab with composite values in collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + expectedValues := make([][2]Value, size) + for i := 0; i < size; i++ { + // Create nested array + nested, err := NewArray(storage, address, typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for j := 0; j < 50; j++ { + err = nested.Append(Uint64Value(j)) + require.NoError(t, err) + } - verifyMapLoadedElements(t, m, values) - }) + k := Uint64Value(i) + v := nested - t.Run("root data slab with composite values in external collision group", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedValues[i] = [2]Value{k, v} - // Create parent map with 3 external collision group, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Set nested array to parent + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) - }) + return m, expectedValues +} - t.Run("root data slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func createMapWithSimpleAndCompositeValues( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + size int, + compositeValueIndex int, + newDigests func(i int) []Digest, +) (*OrderedMap, [][2]Value) { - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + digesterBuilder := &mockDigesterBuilder{} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Create parent map + m, err := NewMap(storage, address, digesterBuilder, typeInfo) + require.NoError(t, err) - verifyMapLoadedElements(t, m, values) + values := make([][2]Value, size) + r := 'a' + for i := 0; i < size; i++ { - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + k := Uint64Value(i) - nestedArray, ok := v.(*Array) - require.True(t, ok) + digests := newDigests(i) + digesterBuilder.On("Digest", k).Return(mockDigester{digests}) - err := storage.Remove(nestedArray.SlabID()) + if compositeValueIndex == i { + // Create nested array with one element + a, err := NewArray(storage, address, typeInfo) require.NoError(t, err) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + for j := 0; j < 50; j++ { + err = a.Append(Uint64Value(j)) + require.NoError(t, err) + } + + values[i] = [2]Value{k, a} + } else { + values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} } - }) - t.Run("root data slab with long string keys, unload key from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + return m, values +} - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) +func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { + i := 0 + err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { + require.True(t, i < len(expectedValues)) + valueEqual(t, typeInfoComparator, expectedValues[i][0], k) + valueEqual(t, typeInfoComparator, expectedValues[i][1], v) + i++ + return true, nil + }) + require.NoError(t, err) + require.Equal(t, len(expectedValues), i) +} - verifyMapLoadedElements(t, m, values) +func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { + var counter int + for _, slab := range storage.deltas { + if _, ok := slab.(*MapMetaDataSlab); ok { + counter++ + } + } + return counter +} - // Unload external key from front to back. - for i := 0; i < len(values); i++ { - k := values[i][0] +func TestMaxInlineMapValueSize(t *testing.T) { - s, ok := k.(StringValue) - require.True(t, ok) + t.Run("small key", func(t *testing.T) { + // Value has larger max inline size when key is less than max map key size. - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + SetThreshold(256) + defer SetThreshold(1024) - require.NoError(t, keyID.Valid()) + mapSize := 2 + keyStringSize := 16 // Key size is less than max map key size. + valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. - err := storage.Remove(keyID) - require.NoError(t, err) + r := newRand(t) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in collision group, unload value from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Both key and value are stored in map slab. + require.Equal(t, 1, len(storage.deltas)) - // Unload composite element from front to back. - for i := 0; i < len(values); i++ { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) + + t.Run("max size key", func(t *testing.T) { + // Value max size is about half of max map element size when key is exactly max map key size. + + SetThreshold(256) + defer SetThreshold(1024) - nestedArray, ok := v.(*Array) - require.True(t, ok) + mapSize := 1 + keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). + valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + r := newRand(t) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in external collision group, unload value from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Key is stored in map slab, while value is stored separately in storable slab. + require.Equal(t, 2, len(storage.deltas)) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) - nestedArray, ok := v.(*Array) - require.True(t, ok) + t.Run("large key", func(t *testing.T) { + // Value has larger max inline size when key is more than max map key size because + // when key size exceeds max map key size, it is stored in a separate storable slab, + // and SlabIDStorable is stored as key in the map, which is 19 bytes. - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + SetThreshold(256) + defer SetThreshold(1024) - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + mapSize := 1 + keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size + valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + + r := newRand(t) + + keyValues := make(map[Value]Value, mapSize) + for len(keyValues) < mapSize { + k := NewStringValue(randStr(r, int(keyStringSize))) + v := NewStringValue(randStr(r, int(valueStringSize))) + keyValues[k] = v } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from front to back", func(t *testing.T) { + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} storage := newTestPersistentStorage(t) - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - verifyMapLoadedElements(t, m, values) + // Key is stored in separate storable slabs, while value is stored in map slab. + require.Equal(t, 2, len(storage.deltas)) - // Unload external collision group slab from front to back + verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + }) +} - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) +func TestMapID(t *testing.T) { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - for i, id := range externalCollisionSlabIDs { - err := storage.Remove(id) - require.NoError(t, err) + sid := m.SlabID() + id := m.ValueID() - expectedValues := values[i*4+4:] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + require.Equal(t, sid.address[:], id[:8]) + require.Equal(t, sid.index[:], id[8:]) +} - t.Run("root data slab with composite values, unload composite value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { + const ( + mapSize = 3 + keyStringSize = 16 + initialStorableSize = 1 + mutatedStorableSize = 5 + ) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + keyValues := make(map[Value]*testMutableValue, mapSize) + for i := 0; i < mapSize; i++ { + k := Uint64Value(i) + v := newTestMutableValue(initialStorableSize) + keyValues[k] = v + } - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + typeInfo := testTypeInfo{42} + address := Address{1, 2, 3, 4, 5, 6, 7, 8} + storage := newTestPersistentStorage(t) - verifyMapLoadedElements(t, m, values) + m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - // Unload composite element from back to front. - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + for k, v := range keyValues { + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + } - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.True(t, m.root.IsData()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize + expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) - t.Run("root data slab with long string key, unload key from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Reset mutable values after changing its storable size + for k, v := range keyValues { + v.updateStorableSize(mutatedStorableSize) - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.NotNil(t, existingStorable) + } - // parent map: 1 root data slab - // long string keys: 1 storable slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, m.root.IsData()) - verifyMapLoadedElements(t, m, values) + expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize + expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize + require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) - // Unload composite element from front to back. - for i := len(values) - 1; i >= 0; i-- { - k := values[i][0] + err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + require.NoError(t, err) +} - s, ok := k.(StringValue) - require.True(t, ok) +func TestChildMapInlinabilityInParentMap(t *testing.T) { - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } - } - } + SetThreshold(256) + defer SetThreshold(1024) - require.NoError(t, keyID.Valid()) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - err := storage.Remove(keyID) - require.NoError(t, err) + t.Run("parent is root data slab, with one child map", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + ) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - t.Run("root data slab with composite values in collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + r := newRand(t) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - // parent map: 1 root data slab - // composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - nestedArray, ok := v.(*Array) - require.True(t, ok) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + child.keys = append(child.keys, k) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) + + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - }) - t.Run("root data slab with composite values in external collision group, unload value from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childMap := child.m + valueID := child.valueID - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + child.keys = append(child.keys, k) - verifyMapLoadedElements(t, m, values) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(expectedSlabID).ByteSize() + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root data slab with composite values in external collision group, unload external slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove elements from child map which triggers standalone map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // parent map: 1 root data slab, 3 external collision group - // composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - verifyMapLoadedElements(t, m, values) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - // Unload external slabs from back to front - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } - } - } - require.Equal(t, 3, len(externalCollisionSlabIDs)) + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedParentElementSize*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - return a.AddressAsUint64() < b.AddressAsUint64() - }) - - for i := len(externalCollisionSlabIDs) - 1; i >= 0; i-- { - err := storage.Remove(externalCollisionSlabIDs[i]) - require.NoError(t, err) - - expectedValues := values[:i*4] - verifyMapLoadedElements(t, m, expectedValues) } - }) - t.Run("root data slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + t.Run("parent is root data slab, with two child maps", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 9 + valueStringSize = 4 ) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload value in the middle - unloadValueIndex := 1 + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - v := values[unloadValueIndex][1] + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - nestedArray, ok := v.(*Array) - require.True(t, ok) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - verifyMapLoadedElements(t, m, values) - }) + expectedParentSize := parentMap.root.ByteSize() - t.Run("root data slab with long string key, unload key in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - const mapSize = 3 - m, values := createMapWithLongStringKey(t, storage, address, typeInfo, mapSize) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + child.keys = append(child.keys, k) - verifyMapLoadedElements(t, m, values) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - // Unload key in the middle. - unloadValueIndex := 1 + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged + require.Equal(t, 1, getStoredDeltas(storage)) - k := values[unloadValueIndex][0] + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - s, ok := k.(StringValue) - require.True(t, ok) + // Test parent slab size + expectedParentSize += expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Find storage id for StringValue s. - var keyID SlabID - for id, slab := range storage.deltas { - if sslab, ok := slab.(*StorableSlab); ok { - if other, ok := sslab.storable.(StringValue); ok { - if s.str == other.str { - keyID = id - break - } - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } - require.NoError(t, keyID.Valid()) - - err := storage.Remove(keyID) - require.NoError(t, err) - - copy(values[unloadValueIndex:], values[unloadValueIndex+1:]) - values = values[:len(values)-1] + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for i, child := range children { + childMap := child.m + valueID := child.valueID - verifyMapLoadedElements(t, m, values) - }) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - t.Run("root data slab with composite values in collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + child.keys = append(child.keys, k) - // Create parent map with 3 collision groups, 2 elements in each group. - const mapSize = 6 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 2), Digest(i)} }, - ) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.False(t, childMap.Inlined()) + require.Equal(t, 1+1+i, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - verifyMapLoadedElements(t, m, values) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - // Unload composite element in the middle - for _, unloadValueIndex := range []int{1, 3, 5} { - v := values[unloadValueIndex][1] + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Subtract inlined child map size from expected parent size + expectedParentSize -= uint32(inlinedMapDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()-1) + // Add slab id storable size to expected parent size + expectedParentSize += SlabIDStorable(expectedSlabID).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - } - verifyMapLoadedElements(t, m, expectedValues) - }) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. - t.Run("root data slab with composite values in external collision group, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for i, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + lastKey := keys[len(keys)-1] + child.keys = child.keys[:len(keys)-1] - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + require.NoError(t, err) + require.Equal(t, lastKey, existingKey) + require.NotNil(t, existingValue) - verifyMapLoadedElements(t, m, values) + require.Equal(t, 1+mapSize-1-i, getStoredDeltas(storage)) - // Unload composite value in the middle. - for _, unloadValueIndex := range []int{1, 3, 5, 7, 9, 11} { - v := values[unloadValueIndex][1] + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + // Subtract slab id storable size from expected parent size + expectedParentSize -= SlabIDStorable(SlabID{}).ByteSize() + // Add expected inlined child map to expected parent size + expectedParentSize += expectedInlinedMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - expectedValues := [][2]Value{ - values[0], - values[2], - values[4], - values[6], - values[8], - values[10], + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) - }) - t.Run("root data slab with composite values in external collision group, unload external slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Remove remaining elements from each inlined child map. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - // Create parent map with 3 external collision groups, 4 elements in the group. - const mapSize = 12 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i / 4), Digest(i)} }, - ) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // parent map: 1 root data slab, 3 external collision group - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+3+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.Equal(t, 1, getStoredDeltas(storage)) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged + + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + expectedParentSize -= expectedChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Unload external slabs in the middle. - var externalCollisionSlabIDs []SlabID - for id, slab := range storage.deltas { - if dataSlab, ok := slab.(*MapDataSlab); ok { - if dataSlab.collisionGroup { - externalCollisionSlabIDs = append(externalCollisionSlabIDs, id) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } } - require.Equal(t, 3, len(externalCollisionSlabIDs)) - sort.Slice(externalCollisionSlabIDs, func(i, j int) bool { - a := externalCollisionSlabIDs[i] - b := externalCollisionSlabIDs[j] - if a.address == b.address { - return a.IndexAsUint64() < b.IndexAsUint64() - } - return a.AddressAsUint64() < b.AddressAsUint64() - }) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + }) - id := externalCollisionSlabIDs[1] - err := storage.Remove(id) - require.NoError(t, err) + t.Run("parent is root metadata slab, with four child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 9 + valueStringSize = 4 + ) - copy(values[4:], values[8:]) - values = values[:8] + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) - }) + r := newRand(t) - t.Run("root data slab with composite values, unload composite elements during iteration", func(t *testing.T) { + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 3 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + parentMap, expectedKeyValues := createMapWithEmptyChildMap(t, storage, address, typeInfo, mapSize, func() Value { + return NewStringValue(randStr(r, keyStringSize)) + }) - // parent map: 1 root data slab - // nested composite elements: 1 root data slab for each - require.Equal(t, 1+mapSize, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - // At this point, iterator returned first element (v). + children := getInlinedChildMapsFromParentMap(t, address, parentMap) - // Remove all other nested composite elements (except first element) from storage. - for _, element := range values[1:] { - value := element[1] - nestedArray, ok := value.(*Array) - require.True(t, ok) + // Appending 3 elements to child map so that inlined child map reaches max inlined size as map element. + for i := 0; i < 3; i++ { + for _, child := range children { + childMap := child.m + valueID := child.valueID - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - require.Equal(t, 0, i) - valueEqual(t, typeInfoComparator, values[0][0], k) - valueEqual(t, typeInfoComparator, values[0][1], v) - i++ - return true, nil - }) + child.keys = append(child.keys, k) - require.NoError(t, err) - require.Equal(t, 1, i) // Only first element is iterated because other elements are remove during iteration. - }) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + require.Equal(t, uint64(i+1), childMap.Count()) - t.Run("root data slab with simple and composite values, unloading composite value", func(t *testing.T) { - const mapSize = 3 + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - // parent map: 1 root data slab - // composite element: 1 root data slab - require.Equal(t, 2, len(storage.deltas)) - require.Equal(t, 0, getMapMetaDataSlabCount(storage)) + // Parent array has 1 meta data slab and 2 data slabs. + // All child arrays are inlined. + require.Equal(t, 3, getStoredDeltas(storage)) + require.False(t, parentMap.root.IsData()) - verifyMapLoadedElements(t, m, values) + // Add one more element to child array which triggers inlined child array slab becomes standalone slab + for _, child := range children { + childMap := child.m + valueID := child.valueID - // Unload composite value - v := values[nestedCompositeIndex][1].(*Array) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(v.SlabID()) + child.keys = append(child.keys, k) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + require.False(t, childMap.Inlined()) - verifyMapLoadedElements(t, m, values) - } - }) + expectedSlabID := valueIDToSlabID(valueID) + require.Equal(t, expectedSlabID, childMap.SlabID()) // Storage ID is the same bytewise as value ID. + require.Equal(t, valueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with simple values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedStandaloneSlabSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedStandaloneSlabSize, childMap.root.ByteSize()) - const mapSize = 20 - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Parent map has one root data slab. + // Each child maps has one root data slab. + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There are >1 stored slab because child map is no longer inlined. + require.True(t, parentMap.root.IsData()) - verifyMapLoadedElements(t, m, values) - }) + // Remove one element from each child map which triggers standalone map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - t.Run("root metadata slab with composite values", func(t *testing.T) { - storage := newTestPersistentStorage(t) + lastKey := keys[len(keys)-1] + child.keys = child.keys[:len(keys)-1] - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, lastKey) + require.NoError(t, err) + require.Equal(t, lastKey, existingKey) + require.NotNil(t, existingValue) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - verifyMapLoadedElements(t, m, values) - }) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - t.Run("root metadata slab with composite values, unload value from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map has one metadata slab + 2 data slabs. + require.Equal(t, 3, getStoredDeltas(storage)) // There are 3 stored slab because child map is inlined again. + require.False(t, parentMap.root.IsData()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values : 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Remove remaining elements from each inlined child map. + for _, child := range children { + childMap := child.m + valueID := child.valueID + keys := child.keys - verifyMapLoadedElements(t, m, values) + for _, k := range keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - // Unload composite element from front to back - for i := 0; i < len(values); i++ { - v := values[i][1] + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, valueID, childMap.ValueID()) // value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedInlinedMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedInlinedMapSize, childMap.root.ByteSize()) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } - expectedValues := values[i+1:] - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(mapSize), parentMap.Count()) + for _, child := range children { + require.Equal(t, uint64(0), child.m.Count()) } + + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(mapSize) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) }) +} - t.Run("root metadata slab with composite values, unload values from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) +func TestNestedThreeLevelChildMapInlinabilityInParentMap(t *testing.T) { - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + SetThreshold(256) + defer SetThreshold(1024) + + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 ) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() - verifyMapLoadedElements(t, m, values) + r := newRand(t) - // Unload composite element from back to front - for i := len(values) - 1; i >= 0; i-- { - v := values[i][1] + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - nestedArray, ok := v.(*Array) - require.True(t, ok) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - expectedValues := values[:i] - verifyMapLoadedElements(t, m, expectedValues) - } - }) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("root metadata slab with composite values, unload value in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - const mapSize = 20 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 4+mapSize, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + expectedParentSize := parentMap.root.ByteSize() - verifyMapLoadedElements(t, m, values) + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Unload composite element in the middle - for _, index := range []int{4, 14} { + childMap := child.m + cValueID := child.valueID - v := values[index][1] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - nestedArray, ok := v.(*Array) - require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - err := storage.Remove(nestedArray.SlabID()) + gchild.keys = append(gchild.keys, k) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[index:], values[index+1:]) - values = values[:len(values)-1] + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } - }) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab with simple and composite values, unload composite value", func(t *testing.T) { - const mapSize = 20 + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Create a map with nested composite value at specified index - for nestedCompositeIndex := 0; nestedCompositeIndex < mapSize; nestedCompositeIndex++ { - storage := newTestPersistentStorage(t) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleAndCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - nestedCompositeIndex, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - // composite values: 1 root data slab for each - require.Equal(t, 5, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Add one more element to grand child map which triggers inlined child map slab (NOT grand child map slab) becomes standalone slab + for _, child := range children { + require.Equal(t, 1, len(child.children)) - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - v := values[nestedCompositeIndex][1].(*Array) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - err := storage.Remove(v.SlabID()) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - copy(values[nestedCompositeIndex:], values[nestedCompositeIndex+1:]) - values = values[:len(values)-1] + gchild.keys = append(gchild.keys, k) - verifyMapLoadedElements(t, m, values) - } - }) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab, unload data slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - const mapSize = 20 + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + SlabIDStorable(SlabID{}).ByteSize() + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - // Unload data slabs from front to back - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - childHeader := rootMetaDataSlab.childrenHeaders[i] + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - // Get data slab element count before unload it from storage. - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - count := mapDataSlab.elements.Count() + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - values = values[count:] + for _, k := range gchild.keys { + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - verifyMapLoadedElements(t, m, values) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } + + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) }) - t.Run("root metadata slab, unload data slab from back to front", func(t *testing.T) { + t.Run("parent is root data slab, one child map, one grand child map, changes to grand child map triggers grand child array slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 1 + keyStringSize = 9 + valueStringSize = 4 + largeValueStringSize = 40 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + encodedLargeValueSize := NewStringValue(strings.Repeat("a", largeValueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() + + r := newRand(t) + + typeInfo := testTypeInfo{42} storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - const mapSize = 20 + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Create a parent map, with an inlined child map, with an inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - // Unload data slabs from back to front - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + expectedParentSize := parentMap.root.ByteSize() - childHeader := rootMetaDataSlab.childrenHeaders[i] + // Inserting 1 elements to grand child map so that inlined grand child map reaches max inlined size as map element. + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Get data slab element count before unload it from storage - // Element count isn't in the header. - mapDataSlab, ok := storage.deltas[childHeader.slabID].(*MapDataSlab) - require.True(t, ok) + childMap := child.m + cValueID := child.valueID - count := mapDataSlab.elements.Count() + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - err := storage.Remove(childHeader.slabID) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) + + gchild.keys = append(gchild.keys, k) + + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - values = values[:len(values)-int(count)] + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } - }) + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload data slab in the middle", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - const mapSize = 20 + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (2 levels): 1 root metadata slab, 3 data slabs - require.Equal(t, 4, len(storage.deltas)) - require.Equal(t, 1, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - require.True(t, len(rootMetaDataSlab.childrenHeaders) > 2) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - index := 1 - childHeader := rootMetaDataSlab.childrenHeaders[index] + // Add one large element to grand child map which triggers inlined grand child map slab (NOT child map slab) becomes standalone slab + for _, child := range children { + require.Equal(t, 1, len(child.children)) - // Get element count from previous data slab - mapDataSlab, ok := storage.deltas[rootMetaDataSlab.childrenHeaders[0].slabID].(*MapDataSlab) - require.True(t, ok) + childMap := child.m + cValueID := child.valueID - countAtIndex0 := mapDataSlab.elements.Count() + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - // Get element count from slab to be unloaded - mapDataSlab, ok = storage.deltas[rootMetaDataSlab.childrenHeaders[index].slabID].(*MapDataSlab) - require.True(t, ok) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, largeValueStringSize)) - countAtIndex1 := mapDataSlab.elements.Count() + gchild.keys = append(gchild.keys, k) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - copy(values[countAtIndex0:], values[countAtIndex0+countAtIndex1:]) - values = values[:m.Count()-uint64(countAtIndex1)] + // Grand child map is NOT inlined + require.False(t, gchildMap.Inlined()) + require.Equal(t, valueIDToSlabID(gValueID), gchildMap.SlabID()) // Slab ID is valid for not inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - }) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - t.Run("root metadata slab, unload non-root metadata slab from front to back", func(t *testing.T) { - storage := newTestPersistentStorage(t) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 2, getStoredDeltas(storage)) - const mapSize = 200 + // Test standalone grand child slab size + expectedGrandChildElement1Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildElement2Size := singleElementPrefixSize + digestSize + encodedKeySize + encodedLargeValueSize + expectedGrandChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElement1Size + expectedGrandChildElement2Size + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + expectedChildElementSize + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + // Test parent slab size + expectedParentSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Unload non-root metadata slabs from front to back. - for i := 0; i < len(rootMetaDataSlab.childrenHeaders); i++ { + require.True(t, parentMap.root.IsData()) + require.Equal(t, 2, getStoredDeltas(storage)) // There is 2 stored slab because child map is not inlined. - childHeader := rootMetaDataSlab.childrenHeaders[i] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Remove elements from grand child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // Use firstKey to deduce number of elements in slab. - var expectedValues [][2]Value - if i < len(rootMetaDataSlab.childrenHeaders)-1 { - nextChildHeader := rootMetaDataSlab.childrenHeaders[i+1] - expectedValues = values[int(nextChildHeader.firstKey):] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID + + // Remove the last element (large element) first to trigger grand child map being inlined again. + for i := len(gchild.keys) - 1; i >= 0; i-- { + k := gchild.keys[i] + + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedChildMapSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - verifyMapLoadedElements(t, m, expectedValues) + require.Equal(t, uint64(0), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) } - }) - t.Run("root metadata slab, unload non-root metadata slab from back to front", func(t *testing.T) { - storage := newTestPersistentStorage(t) + require.Equal(t, uint64(1), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - const mapSize = 200 + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - m, values := createMapWithSimpleValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, + t.Run("parent is root data slab, two child map, one grand child map each, changes to child map triggers child map slab to become standalone slab", func(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 ) - // parent map (3 levels): 1 root metadata slab, 3 child metadata slabs, n data slabs - require.Equal(t, 4, getMapMetaDataSlabCount(storage)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + r := newRand(t) - // Unload non-root metadata slabs from back to front. - for i := len(rootMetaDataSlab.childrenHeaders) - 1; i >= 0; i-- { + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - childHeader := rootMetaDataSlab.childrenHeaders[i] + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - err := storage.Remove(childHeader.slabID) - require.NoError(t, err) + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - // Use firstKey to deduce number of elements in slabs. - values = values[:childHeader.firstKey] + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - verifyMapLoadedElements(t, m, values) - } - }) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - t.Run("root metadata slab with composite values, unload composite value at random index", func(t *testing.T) { + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - storage := newTestPersistentStorage(t) + expectedParentSize := parentMap.root.ByteSize() - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Insert 1 elements to grand child map (both child map and grand child map are still inlined). + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // nested composite elements: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - verifyMapLoadedElements(t, m, values) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - r := newRand(t) + gchild.keys = append(gchild.keys, k) - // Unload composite element in random position - for len(values) > 0 { + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - i := r.Intn(len(values)) + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - v := values[i][1] + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - nestedArray, ok := v.(*Array) - require.True(t, ok) + // Only parent map slab is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) - err := storage.Remove(nestedArray.SlabID()) - require.NoError(t, err) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - copy(values[i:], values[i+1:]) - values = values[:len(values)-1] + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test parent slab size + expectedParentSize += expectedGrandChildElementSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - }) - t.Run("root metadata slab with composite values, unload random data slab", func(t *testing.T) { + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - storage := newTestPersistentStorage(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + expectedParentSize = parentMap.root.ByteSize() - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + // Add 1 element to each child map so child map reaches its max size + for _, child := range children { - verifyMapLoadedElements(t, m, values) + childMap := child.m + cValueID := child.valueID - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - type slabInfo struct { - id SlabID - startIndex int - count int - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - var dataSlabInfos []*slabInfo - for _, mheader := range rootMetaDataSlab.childrenHeaders { + child.keys = append(child.keys, k) - nonRootMetaDataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - for i := 0; i < len(nonRootMetaDataSlab.childrenHeaders); i++ { - h := nonRootMetaDataSlab.childrenHeaders[i] + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged + + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, 1, getStoredDeltas(storage)) + + // Test inlined grand child slab size + expectedGrandChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - if len(dataSlabInfos) > 0 { - // Update previous slabInfo.count - dataSlabInfos[len(dataSlabInfos)-1].count = int(h.firstKey) - dataSlabInfos[len(dataSlabInfos)-1].startIndex - } + // Test inlined child slab size + expectedChildElementSize := digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - dataSlabInfos = append(dataSlabInfos, &slabInfo{id: h.slabID, startIndex: int(h.firstKey)}) - } - } + // Test parent slab size + expectedParentSize += digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - r := newRand(t) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - for len(dataSlabInfos) > 0 { - index := r.Intn(len(dataSlabInfos)) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is 1 stored slab because child map is inlined. - slabToBeRemoved := dataSlabInfos[index] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Update startIndex for all subsequence data slabs - for i := index + 1; i < len(dataSlabInfos); i++ { - dataSlabInfos[i].startIndex -= slabToBeRemoved.count - } + // Add 1 more element to each child map so child map reaches its max size + for i, child := range children { - err := storage.Remove(slabToBeRemoved.id) - require.NoError(t, err) + childMap := child.m + cValueID := child.valueID - if index == len(dataSlabInfos)-1 { - values = values[:slabToBeRemoved.startIndex] - } else { - copy(values[slabToBeRemoved.startIndex:], values[slabToBeRemoved.startIndex+slabToBeRemoved.count:]) - values = values[:len(values)-slabToBeRemoved.count] - } + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - copy(dataSlabInfos[index:], dataSlabInfos[index+1:]) - dataSlabInfos = dataSlabInfos[:len(dataSlabInfos)-1] + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - verifyMapLoadedElements(t, m, values) - } + child.keys = append(child.keys, k) - require.Equal(t, 0, len(values)) - }) + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - t.Run("root metadata slab with composite values, unload random slab", func(t *testing.T) { + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - storage := newTestPersistentStorage(t) + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is the same as value ID for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - const mapSize = 500 - m, values := createMapWithCompositeValues( - t, - storage, - address, - typeInfo, - mapSize, - func(i int) []Digest { return []Digest{Digest(i)} }, - ) + // Parent map is standalone + require.False(t, parentMap.Inlined()) + require.Equal(t, (1 + i + 1), getStoredDeltas(storage)) - // parent map (3 levels): 1 root metadata slab, n non-root metadata slabs, n data slabs - // composite values: 1 root data slab for each - require.True(t, len(storage.deltas) > 1+mapSize) - require.True(t, getMapMetaDataSlabCount(storage) > 1) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - verifyMapLoadedElements(t, m, values) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*2 + (digestSize + singleElementPrefixSize + encodedKeySize + expectedGrandChildMapSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - type slabInfo struct { - id SlabID - startIndex int - count int - children []*slabInfo + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) } - rootMetaDataSlab, ok := m.root.(*MapMetaDataSlab) - require.True(t, ok) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) // There is 1+mapSize stored slab because all child maps are standalone. - metadataSlabInfos := make([]*slabInfo, len(rootMetaDataSlab.childrenHeaders)) - for i, mheader := range rootMetaDataSlab.childrenHeaders { + // Test parent slab size + expectedParentSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (singleElementPrefixSize+digestSize+encodedKeySize+slabIDStorableSize)*mapSize + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) - if i > 0 { - prevMetaDataSlabInfo := metadataSlabInfos[i-1] - prevDataSlabInfo := prevMetaDataSlabInfo.children[len(prevMetaDataSlabInfo.children)-1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - // Update previous metadata slab count - prevMetaDataSlabInfo.count = int(mheader.firstKey) - prevMetaDataSlabInfo.startIndex + expectedParentMapSize := parentMap.root.ByteSize() - // Update previous data slab count - prevDataSlabInfo.count = int(mheader.firstKey) - prevDataSlabInfo.startIndex - } + // Remove one element from child map which triggers standalone child map slab becomes inlined slab again. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - metadataSlabInfo := &slabInfo{ - id: mheader.slabID, - startIndex: int(mheader.firstKey), - } + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - nonRootMetadataSlab, ok := storage.deltas[mheader.slabID].(*MapMetaDataSlab) - require.True(t, ok) + // Remove one element + k := child.keys[len(child.keys)-1] + child.keys = child.keys[:len(child.keys)-1] - children := make([]*slabInfo, len(nonRootMetadataSlab.childrenHeaders)) - for i, h := range nonRootMetadataSlab.childrenHeaders { - children[i] = &slabInfo{ - id: h.slabID, - startIndex: int(h.firstKey), - } - if i > 0 { - children[i-1].count = int(h.firstKey) - children[i-1].startIndex - } - } + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - metadataSlabInfo.children = children - metadataSlabInfos[i] = metadataSlabInfo - } + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - const ( - metadataSlabType int = iota - dataSlabType - maxSlabType - ) + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged - r := newRand(t) + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - for len(metadataSlabInfos) > 0 { + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - var slabInfoToBeRemoved *slabInfo - var isLastSlab bool + // Test parent child slab size + expectedParentMapSize = expectedParentMapSize - slabIDStorableSize + expectedChildMapSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - switch r.Intn(maxSlabType) { + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - case metadataSlabType: + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - isLastSlab = metadataSlabIndex == len(metadataSlabInfos)-1 + // remove remaining elements from child map, except for grand child map + for _, child := range children { + childMap := child.m + cValueID := child.valueID - slabInfoToBeRemoved = metadataSlabInfos[metadataSlabIndex] + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - count := slabInfoToBeRemoved.count + // Remove all elements, except grand child map (first element in child.keys) + for _, k := range child.keys[1:] { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent child slab size + expectedParentMapSize -= digestSize + singleElementPrefixSize + encodedKeySize + encodedValueSize + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Update startIndex for subsequence metadata slabs - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + require.Equal(t, uint64(1), gchildMap.Count()) + require.Equal(t, uint64(1), childMap.Count()) + } - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map and grand child map are inlined. - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + }) - case dataSlabType: + t.Run("parent is root metadata slab, with four child maps, each child map has grand child maps", func(t *testing.T) { + const ( + mapSize = 4 + keyStringSize = 4 + valueStringSize = 8 + ) - metadataSlabIndex := r.Intn(len(metadataSlabInfos)) + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() + encodedValueSize := NewStringValue(strings.Repeat("a", valueStringSize)).ByteSize() + slabIDStorableSize := SlabIDStorable(SlabID{}).ByteSize() - metadataSlabInfo := metadataSlabInfos[metadataSlabIndex] + r := newRand(t) - dataSlabIndex := r.Intn(len(metadataSlabInfo.children)) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - isLastSlab = (metadataSlabIndex == len(metadataSlabInfos)-1) && - (dataSlabIndex == len(metadataSlabInfo.children)-1) + getKeyFunc := func() Value { + return NewStringValue(randStr(r, keyStringSize)) + } - slabInfoToBeRemoved = metadataSlabInfo.children[dataSlabIndex] + // Create a parent map, with inlined child map, containing inlined grand child map + parentMap, expectedKeyValues := createMapWithEmpty2LevelChildMap(t, storage, address, typeInfo, mapSize, getKeyFunc) - count := slabInfoToBeRemoved.count + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - // Update startIndex for all subsequence data slabs in this metadata slab info - for i := dataSlabIndex + 1; i < len(metadataSlabInfo.children); i++ { - metadataSlabInfo.children[i].startIndex -= count - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - copy(metadataSlabInfo.children[dataSlabIndex:], metadataSlabInfo.children[dataSlabIndex+1:]) - metadataSlabInfo.children = metadataSlabInfo.children[:len(metadataSlabInfo.children)-1] + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - metadataSlabInfo.count -= count + // Insert 1 element to grand child map + // Both child map and grand child map are still inlined, but parent map's root slab is metadata slab. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - // Update startIndex for all subsequence metadata slabs. - for i := metadataSlabIndex + 1; i < len(metadataSlabInfos); i++ { - metadataSlabInfos[i].startIndex -= count + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - for j := 0; j < len(metadataSlabInfos[i].children); j++ { - metadataSlabInfos[i].children[j].startIndex -= count - } - } + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - if len(metadataSlabInfo.children) == 0 { - copy(metadataSlabInfos[metadataSlabIndex:], metadataSlabInfos[metadataSlabIndex+1:]) - metadataSlabInfos = metadataSlabInfos[:len(metadataSlabInfos)-1] - } - } + gchild.keys = append(gchild.keys, k) - err := storage.Remove(slabInfoToBeRemoved.id) + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) + require.Nil(t, existingStorable) - if isLastSlab { - values = values[:slabInfoToBeRemoved.startIndex] - } else { - copy(values[slabInfoToBeRemoved.startIndex:], values[slabInfoToBeRemoved.startIndex+slabInfoToBeRemoved.count:]) - values = values[:len(values)-slabInfoToBeRemoved.count] - } + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged - verifyMapLoadedElements(t, m, values) - } + // Child map is still inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - require.Equal(t, 0, len(values)) - }) -} + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) -func createMapWithLongStringKey( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, -) (*OrderedMap, [][2]Value) { + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - digesterBuilder := &mockDigesterBuilder{} + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - // Create parent map. - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + require.False(t, parentMap.Inlined()) + require.False(t, parentMap.root.IsData()) + // There is 3 stored slab: parent metadata slab with 2 data slabs (all child and grand child maps are inlined) + require.Equal(t, 3, getStoredDeltas(storage)) - expectedValues := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { - s := strings.Repeat(string(r), int(maxInlineMapElementSize)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - k := NewStringValue(s) - v := Uint64Value(i) + // Insert 1 element to grand child map + // - grand child maps are inlined + // - child maps are standalone + // - parent map's root slab is data slab. + for _, child := range children { + childMap := child.m + cValueID := child.valueID - expectedValues[i] = [2]Value{k, v} + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - digests := []Digest{Digest(i)} - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) + gchild.keys = append(gchild.keys, k) - r++ - } + existingStorable, err := gchildMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - return m, expectedValues -} + // Grand child map is still inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, gValueID, gchildMap.ValueID()) // Value ID is unchanged -func createMapWithSimpleValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + // Child map is NOT inlined + require.False(t, childMap.Inlined()) + require.Equal(t, valueIDToSlabID(cValueID), childMap.SlabID()) // Slab ID is same as value ID + require.Equal(t, cValueID, childMap.ValueID()) // Value ID is unchanged - digesterBuilder := &mockDigesterBuilder{} + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + // Test standalone child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - expectedValues := make([][2]Value, size) - r := rune('a') - for i := 0; i < size; i++ { - k := Uint64Value(i) - v := NewStringValue(strings.Repeat(string(r), 20)) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + require.False(t, parentMap.Inlined()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1+mapSize, getStoredDeltas(storage)) - expectedValues[i] = [2]Value{k, v} + // Test parent slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + slabIDStorableSize + expectedParentMapSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedParentElementSize*uint32(parentMap.Count()) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) - existingStorable, err := m.Set(compare, hashInputProvider, expectedValues[i][0], expectedValues[i][1]) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - return m, expectedValues -} + // Remove one element from grand child map to trigger child map inlined again. + // - grand child maps are inlined + // - child maps are inlined + // - parent map root slab is metadata slab + for _, child := range children { + childMap := child.m + cValueID := child.valueID -func createMapWithCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { + gchild := child.children[0] + gchildMap := gchild.m + gValueID := gchild.valueID - // Use mockDigesterBuilder to guarantee element order. - digesterBuilder := &mockDigesterBuilder{} + // Remove one element from grand child map + k := gchild.keys[len(gchild.keys)-1] + gchild.keys = gchild.keys[:len(gchild.keys)-1] - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + existingKey, existingValue, err := gchildMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) + + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged + + // Grand child map is inlined + require.True(t, gchildMap.Inlined()) + require.Equal(t, SlabIDUndefined, gchildMap.SlabID()) + require.Equal(t, gValueID, gchildMap.ValueID()) // value ID is unchanged + + // Test inlined grand child slab size + expectedGrandChildElementSize := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedGrandChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedGrandChildElementSize*uint32(gchildMap.Count()) + require.Equal(t, expectedGrandChildMapSize, gchildMap.root.ByteSize()) + + // Test inlined child slab size + expectedChildElementSize1 := singleElementPrefixSize + digestSize + encodedKeySize + expectedGrandChildMapSize + expectedChildElementSize2 := singleElementPrefixSize + digestSize + encodedKeySize + encodedValueSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize1 + expectedChildElementSize2*uint32(childMap.Count()-1) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.False(t, parentMap.root.IsData()) + require.Equal(t, 3, getStoredDeltas(storage)) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + + // Remove all grand child element to trigger + // - child maps are inlined + // - parent map root slab is data slab + for _, child := range children { + childMap := child.m + cValueID := child.valueID + + // Remove grand children + for _, k := range child.keys { + existingKey, existingValue, err := childMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.Equal(t, k, existingKey) + require.NotNil(t, existingValue) - expectedValues := make([][2]Value, size) - for i := 0; i < size; i++ { - // Create nested array - nested, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Child map is inlined + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) + require.Equal(t, cValueID, childMap.ValueID()) // value ID is unchanged - err = nested.Append(Uint64Value(i)) - require.NoError(t, err) + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } - k := Uint64Value(i) - v := nested + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - expectedValues[i] = [2]Value{k, v} + require.Equal(t, uint64(0), childMap.Count()) + } - //digests := []Digest{Digest(i)} - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) - // Set nested array to parent - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - return m, expectedValues + expectedChildMapSize := uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) + expectedParentMapSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + + (digestSize+singleElementPrefixSize+encodedKeySize+expectedChildMapSize)*uint32(mapSize) + require.Equal(t, expectedParentMapSize, parentMap.root.ByteSize()) + }) } -func createMapWithSimpleAndCompositeValues( - t *testing.T, - storage SlabStorage, - address Address, - typeInfo TypeInfo, - size int, - compositeValueIndex int, - newDigests func(i int) []Digest, -) (*OrderedMap, [][2]Value) { +func TestChildMapWhenParentMapIsModified(t *testing.T) { + const ( + mapSize = 2 + keyStringSize = 4 + valueStringSize = 4 + expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + ) + + // encoded key size is the same for all string keys of the same length. + encodedKeySize := NewStringValue(strings.Repeat("a", keyStringSize)).ByteSize() - digesterBuilder := &mockDigesterBuilder{} + r := newRand(t) - // Create parent map - m, err := NewMap(storage, address, digesterBuilder, typeInfo) - require.NoError(t, err) + typeInfo := testTypeInfo{42} + storage := newTestPersistentStorage(t) + address := Address{1, 2, 3, 4, 5, 6, 7, 8} - values := make([][2]Value, size) - r := 'a' - for i := 0; i < size; i++ { + parentMapDigesterBuilder := &mockDigesterBuilder{} + parentDigest := 1 - k := Uint64Value(i) + // Create parent map with mock digests + parentMap, err := NewMap(storage, address, parentMapDigesterBuilder, typeInfo) + require.NoError(t, err) - digests := newDigests(i) - digesterBuilder.On("Digest", k).Return(mockDigester{digests}) + expectedKeyValues := make(map[Value]Value) - if compositeValueIndex == i { - // Create nested array with one element - a, err := NewArray(storage, address, typeInfo) - require.NoError(t, err) + // Insert 2 child map with digest values of 1 and 3. + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - err = a.Append(Uint64Value(i)) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) - values[i] = [2]Value{k, a} - } else { - values[i] = [2]Value{k, NewStringValue(strings.Repeat(string(r), 18))} + digests := []Digest{ + Digest(parentDigest), } + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + parentDigest += 2 - existingStorable, err := m.Set(compare, hashInputProvider, values[i][0], values[i][1]) + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) - } - return m, values -} + expectedKeyValues[k] = childMap -func verifyMapLoadedElements(t *testing.T, m *OrderedMap, expectedValues [][2]Value) { - i := 0 - err := m.IterateLoadedValues(func(k Value, v Value) (bool, error) { - require.True(t, i < len(expectedValues)) - valueEqual(t, typeInfoComparator, expectedValues[i][0], k) - valueEqual(t, typeInfoComparator, expectedValues[i][1], v) - i++ - return true, nil - }) - require.NoError(t, err) - require.Equal(t, len(expectedValues), i) -} + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) -func getMapMetaDataSlabCount(storage *PersistentSlabStorage) int { - var counter int - for _, slab := range storage.deltas { - if _, ok := slab.(*MapMetaDataSlab); ok { - counter++ - } + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + encodedKeySize + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) } - return counter -} -func TestMaxInlineMapValueSize(t *testing.T) { + require.Equal(t, uint64(mapSize), parentMap.Count()) + require.True(t, parentMap.root.IsData()) + require.Equal(t, 1, getStoredDeltas(storage)) // There is only 1 stored slab because child map is inlined. - t.Run("small key", func(t *testing.T) { - // Value has larger max inline size when key is less than max map key size. + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) - SetThreshold(256) - defer SetThreshold(1024) + children := getInlinedChildMapsFromParentMap(t, address, parentMap) + require.Equal(t, mapSize, len(children)) - mapSize := 2 - keyStringSize := 16 // Key size is less than max map key size. - valueStringSize := maxInlineMapElementSize/2 + 10 // Value size is more than half of max map element size. + var keysForNonChildMaps []Value - r := newRand(t) + t.Run("insert elements in parent map", func(t *testing.T) { - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, keyStringSize)) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + newDigests := []Digest{ + 0, // insert value at digest 0, so all child map physical positions are moved by +1 + 2, // insert value at digest 2, so second child map physical positions are moved by +1 + 4, // insert value at digest 4, so no child map physical positions are moved } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + for _, digest := range newDigests { - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + k := NewStringValue(randStr(r, keyStringSize)) + v := NewStringValue(randStr(r, valueStringSize)) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + digests := []Digest{digest} + parentMapDigesterBuilder.On("Digest", k).Return(mockDigester{digests}) + + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, v) require.NoError(t, err) require.Nil(t, existingStorable) - } - // Both key and value are stored in map slab. - require.Equal(t, 1, len(storage.deltas)) + expectedKeyValues[k] = v + keysForNonChildMaps = append(keysForNonChildMaps, k) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + for i, child := range children { + childMap := child.m + childValueID := child.valueID - t.Run("max size key", func(t *testing.T) { - // Value max size is about half of max map element size when key is exactly max map key size. + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) - SetThreshold(256) - defer SetThreshold(1024) + existingStorable, err = childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) - mapSize := 1 - keyStringSize := maxInlineMapKeySize - 2 // Key size is exactly max map key size (2 bytes is string encoding overhead). - valueStringSize := maxInlineMapElementSize/2 + 2 // Value size is more than half of max map element size (add 2 bytes to make it more than half). + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged - r := newRand(t) + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } } - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + t.Run("remove elements from parent map", func(t *testing.T) { + // Remove element at digest 0, so all child map physical position are moved by -1. + // Remove element at digest 2, so only second child map physical position is moved by -1 + // Remove element at digest 4, so no child map physical position is moved by -1 - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + for _, k := range keysForNonChildMaps { - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + existingKey, existingValue, err := parentMap.Remove(compare, hashInputProvider, k) + require.NoError(t, err) + require.NotNil(t, existingKey) + require.NotNil(t, existingValue) - // Key is stored in map slab, while value is stored separately in storable slab. - require.Equal(t, 2, len(storage.deltas)) + delete(expectedKeyValues, k) - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) + for i, child := range children { + childMap := child.m + childValueID := child.valueID + + k := NewStringValue(randStr(r, keyStringSize)) + v := Uint64Value(i) + + existingStorable, err := childMap.Set(compare, hashInputProvider, k, v) + require.NoError(t, err) + require.Nil(t, existingStorable) + + require.True(t, childMap.Inlined()) + require.Equal(t, SlabIDUndefined, childMap.SlabID()) // Slab ID is undefined for inlined slab + require.Equal(t, childValueID, childMap.ValueID()) // Value ID is unchanged + + // Test inlined child slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + k.ByteSize() + v.ByteSize() + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + verifyMap(t, storage, typeInfo, address, parentMap, expectedKeyValues, nil, true) + } + } + }) }) +} - t.Run("large key", func(t *testing.T) { - // Value has larger max inline size when key is more than max map key size because - // when key size exceeds max map key size, it is stored in a separate storable slab, - // and SlabIDStorable is stored as key in the map, which is 19 bytes. +func createMapWithEmptyChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - SetThreshold(256) - defer SetThreshold(1024) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 - mapSize := 1 - keyStringSize := maxInlineMapKeySize + 10 // key size is more than max map key size - valueStringSize := maxInlineMapElementSize/2 + 10 // value size is more than half of max map element size + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - r := newRand(t) + expectedKeyValues := make(map[Value]Value) - keyValues := make(map[Value]Value, mapSize) - for len(keyValues) < mapSize { - k := NewStringValue(randStr(r, int(keyStringSize))) - v := NewStringValue(randStr(r, int(valueStringSize))) - keyValues[k] = v - } + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + k := getKey() - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) require.NoError(t, err) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) - require.NoError(t, err) - require.Nil(t, existingStorable) - } + // Insert child map to parent map + existingStorable, err := parentMap.Set(compare, hashInputProvider, k, childMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - // Key is stored in separate storable slabs, while value is stored in map slab. - require.Equal(t, 2, len(storage.deltas)) + expectedKeyValues[k] = childMap - verifyMap(t, storage, typeInfo, address, m, keyValues, nil, false) - }) + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) + + // Test child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) + } + + return parentMap, expectedKeyValues } -func TestMapID(t *testing.T) { - typeInfo := testTypeInfo{42} - storage := newTestPersistentStorage(t) - address := Address{1, 2, 3, 4, 5, 6, 7, 8} +func createMapWithEmpty2LevelChildMap( + t *testing.T, + storage SlabStorage, + address Address, + typeInfo TypeInfo, + mapSize int, + getKey func() Value, +) (*OrderedMap, map[Value]Value) { - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + const expectedEmptyInlinedMapSize = uint32(inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize) // 22 + + // Create parent map + parentMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) require.NoError(t, err) - sid := m.SlabID() - id := m.ValueID() + expectedKeyValues := make(map[Value]Value) - require.Equal(t, sid.address[:], id[:8]) - require.Equal(t, sid.index[:], id[8:]) -} + for i := 0; i < mapSize; i++ { + // Create child map + childMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) -func TestSlabSizeWhenResettingMutableStorableInMap(t *testing.T) { - const ( - mapSize = 3 - keyStringSize = 16 - initialStorableSize = 1 - mutatedStorableSize = 5 - ) + // Create grand child map + gchildMap, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) + require.NoError(t, err) - keyValues := make(map[Value]*mutableValue, mapSize) - for i := 0; i < mapSize; i++ { - k := Uint64Value(i) - v := newMutableValue(initialStorableSize) - keyValues[k] = v - } + k := getKey() - typeInfo := testTypeInfo{42} - address := Address{1, 2, 3, 4, 5, 6, 7, 8} - storage := newTestPersistentStorage(t) + ks, err := k.Storable(storage, address, maxInlineMapElementSize) + require.NoError(t, err) - m, err := NewMap(storage, address, newBasicDigesterBuilder(), typeInfo) - require.NoError(t, err) + // Insert grand child map to child map + existingStorable, err := childMap.Set(compare, hashInputProvider, k, gchildMap) + require.NoError(t, err) + require.Nil(t, existingStorable) - for k, v := range keyValues { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + require.True(t, gchildMap.Inlined()) + testInlinedMapIDs(t, address, gchildMap) + + // Insert child map to parent map + existingStorable, err = parentMap.Set(compare, hashInputProvider, k, childMap) require.NoError(t, err) require.Nil(t, existingStorable) + + expectedKeyValues[k] = childMap + + require.True(t, childMap.Inlined()) + testInlinedMapIDs(t, address, childMap) + + // Test grand child map slab size + require.Equal(t, expectedEmptyInlinedMapSize, gchildMap.root.ByteSize()) + + // Test child map slab size + expectedChildElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedEmptyInlinedMapSize + expectedChildMapSize := inlinedMapDataSlabPrefixSize + hkeyElementsPrefixSize + + expectedChildElementSize*uint32(childMap.Count()) + require.Equal(t, expectedChildMapSize, childMap.root.ByteSize()) + + // Test parent map slab size + expectedParentElementSize := singleElementPrefixSize + digestSize + ks.ByteSize() + expectedChildMapSize + expectedParentSize := uint32(mapRootDataSlabPrefixSize+hkeyElementsPrefixSize) + // standalone map data slab with 0 element + expectedParentElementSize*uint32(i+1) + require.Equal(t, expectedParentSize, parentMap.root.ByteSize()) } - require.True(t, m.root.IsData()) + testNotInlinedMapIDs(t, address, parentMap) - expectedElementSize := singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + initialStorableSize - expectedMapRootDataSlabSize := mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + return parentMap, expectedKeyValues +} - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) - require.NoError(t, err) +type mapInfo struct { + m *OrderedMap + valueID ValueID + keys []Value + children []*mapInfo +} - // Reset mutable values after changing its storable size - for k, v := range keyValues { - v.updateStorableSize(mutatedStorableSize) +func getInlinedChildMapsFromParentMap(t *testing.T, address Address, parentMap *OrderedMap) []*mapInfo { - existingStorable, err := m.Set(compare, hashInputProvider, k, v) + children := make([]*mapInfo, 0, parentMap.Count()) + + err := parentMap.IterateKeys(func(k Value) (bool, error) { + if k == nil { + return false, nil + } + + e, err := parentMap.Get(compare, hashInputProvider, k) require.NoError(t, err) - require.NotNil(t, existingStorable) - } - require.True(t, m.root.IsData()) + childMap, ok := e.(*OrderedMap) + if !ok { + return true, nil + } - expectedElementSize = singleElementPrefixSize + digestSize + Uint64Value(0).ByteSize() + mutatedStorableSize - expectedMapRootDataSlabSize = mapRootDataSlabPrefixSize + hkeyElementsPrefixSize + expectedElementSize*mapSize - require.Equal(t, expectedMapRootDataSlabSize, m.root.ByteSize()) + if childMap.Inlined() { + testInlinedMapIDs(t, address, childMap) + } else { + testNotInlinedMapIDs(t, address, childMap) + } - err = ValidMap(m, typeInfo, typeInfoComparator, hashInputProvider) + var childKeys []Value + err = childMap.IterateKeys(func(key Value) (bool, error) { + if key == nil { + return false, nil + } + childKeys = append(childKeys, key) + return true, nil + }) + require.NoError(t, err) + + children = append(children, &mapInfo{ + m: childMap, + valueID: childMap.ValueID(), + keys: childKeys, + children: getInlinedChildMapsFromParentMap(t, address, childMap), + }) + + return true, nil + }) require.NoError(t, err) + + return children } diff --git a/storable.go b/storable.go index 2d19fefd..c0102a9c 100644 --- a/storable.go +++ b/storable.go @@ -37,6 +37,14 @@ type Storable interface { ChildStorables() []Storable } +// EquatableStorable is an interface that supports comparison of Storable. +// This is only used for composite keys. +type EquatableStorable interface { + Storable + // Equal returns true if the given storable is equal to this storable. + Equal(Storable) bool +} + type containerStorable interface { Storable hasPointer() bool @@ -50,6 +58,14 @@ func hasPointer(storable Storable) bool { } const ( + CBORTagInlinedArrayExtraData = 247 + CBORTagInlinedMapExtraData = 248 + CBORTagInlinedCompositeExtraData = 249 + + CBORTagInlinedArray = 250 + CBORTagInlinedMap = 251 + CBORTagInlinedComposite = 252 + CBORTagInlineCollisionGroup = 253 CBORTagExternalCollisionGroup = 254 @@ -59,6 +75,7 @@ const ( type SlabIDStorable SlabID var _ Storable = SlabIDStorable{} +var _ containerStorable = SlabIDStorable{} func (v SlabIDStorable) hasPointer() bool { return true diff --git a/storable_test.go b/storable_test.go index 9f4d6ece..77e2b4b4 100644 --- a/storable_test.go +++ b/storable_test.go @@ -345,6 +345,13 @@ func (v StringValue) StoredValue(_ SlabStorage) (Value, error) { return v, nil } +func (v StringValue) Equal(other Storable) bool { + if _, ok := other.(StringValue); !ok { + return false + } + return v.str == other.(StringValue).str +} + func (v StringValue) Storable(storage SlabStorage, address Address, maxInlineSize uint64) (Storable, error) { if uint64(v.ByteSize()) > maxInlineSize { @@ -430,7 +437,7 @@ func (v StringValue) String() string { return v.str } -func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { +func decodeStorable(dec *cbor.StreamDecoder, id SlabID, inlinedExtraData []ExtraData) (Storable, error) { t, err := dec.NextType() if err != nil { return nil, err @@ -451,6 +458,15 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } switch tagNumber { + case CBORTagInlinedArray: + return DecodeInlinedArrayStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedMap: + return DecodeInlinedMapStorable(dec, decodeStorable, id, inlinedExtraData) + + case CBORTagInlinedComposite: + return DecodeInlinedCompositeStorable(dec, decodeStorable, id, inlinedExtraData) + case CBORTagSlabID: return DecodeSlabIDStorable(dec) @@ -492,7 +508,7 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { return Uint64Value(n), nil case cborTagSomeValue: - storable, err := decodeStorable(dec, id) + storable, err := decodeStorable(dec, id, inlinedExtraData) if err != nil { return nil, err } @@ -507,12 +523,43 @@ func decodeStorable(dec *cbor.StreamDecoder, id SlabID) (Storable, error) { } func decodeTypeInfo(dec *cbor.StreamDecoder) (TypeInfo, error) { - value, err := dec.DecodeUint64() + t, err := dec.NextType() if err != nil { return nil, err } - return testTypeInfo{value: value}, nil + switch t { + case cbor.UintType: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testTypeInfo{value: value}, nil + + case cbor.TagType: + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, err + } + + switch tagNum { + case testCompositeTypeInfoTagNum: + value, err := dec.DecodeUint64() + if err != nil { + return nil, err + } + + return testCompositeTypeInfo{value: value}, nil + + default: + return nil, fmt.Errorf("failed to decode type info") + } + + default: + return nil, fmt.Errorf("failed to decode type info") + } + } func compare(storage SlabStorage, value Value, storable Storable) (bool, error) { @@ -677,25 +724,25 @@ func (v SomeStorable) String() string { return fmt.Sprintf("%s", v.Storable) } -type mutableValue struct { +type testMutableValue struct { storable *mutableStorable } -var _ Value = &mutableValue{} +var _ Value = &testMutableValue{} -func newMutableValue(storableSize uint32) *mutableValue { - return &mutableValue{ +func newTestMutableValue(storableSize uint32) *testMutableValue { + return &testMutableValue{ storable: &mutableStorable{ size: storableSize, }, } } -func (v *mutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { +func (v *testMutableValue) Storable(SlabStorage, Address, uint64) (Storable, error) { return v.storable, nil } -func (v *mutableValue) updateStorableSize(n uint32) { +func (v *testMutableValue) updateStorableSize(n uint32) { v.storable.size = n } @@ -710,7 +757,7 @@ func (s *mutableStorable) ByteSize() uint32 { } func (s *mutableStorable) StoredValue(SlabStorage) (Value, error) { - return &mutableValue{s}, nil + return &testMutableValue{s}, nil } func (*mutableStorable) ChildStorables() []Storable { diff --git a/storage.go b/storage.go index 005e69fd..7deb5b60 100644 --- a/storage.go +++ b/storage.go @@ -34,6 +34,13 @@ const LedgerBaseStorageSlabPrefix = "$" // ValueID identifies Array and OrderedMap. type ValueID [16]byte +func slabIDToValueID(sid SlabID) ValueID { + var id ValueID + copy(id[:], sid.address[:]) + copy(id[8:], sid.index[:]) + return id +} + type ( Address [8]byte SlabIndex [8]byte @@ -448,6 +455,8 @@ func CheckStorageHealth(storage SlabStorage, expectedNumberOfRootSlabs int) (map atLeastOneExternalSlab = true } + // This handles inlined slab because inlined slab is a child storable (s) and + // we traverse s.ChildStorables() for its inlined elements. next = append(next, s.ChildStorables()...) } @@ -574,6 +583,11 @@ func (s *PersistentSlabStorage) SlabIterator() (SlabIterator, error) { slabIDStorable, ok := childStorable.(SlabIDStorable) if !ok { + // Append child storables of this childStorable to handle inlined slab containing SlabIDStorable. + nextChildStorables = append( + nextChildStorables, + childStorable.ChildStorables()..., + ) continue } @@ -989,12 +1003,18 @@ func (s *PersistentSlabStorage) Retrieve(id SlabID) (Slab, bool, error) { } func (s *PersistentSlabStorage) Store(id SlabID, slab Slab) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to store slab with undefined slab ID") + } // add to deltas s.deltas[id] = slab return nil } func (s *PersistentSlabStorage) Remove(id SlabID) error { + if id == SlabIDUndefined { + return NewSlabIDError("failed to remove slab with undefined slab ID") + } // add to nil to deltas under that id s.deltas[id] = nil return nil diff --git a/storage_test.go b/storage_test.go index 40a4e6c8..2cd2a929 100644 --- a/storage_test.go +++ b/storage_test.go @@ -900,7 +900,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { data := map[SlabID][]byte{ // (metadata slab) headers: [{id:2 size:228 count:9} {id:3 size:270 count:11} ] id1: { - // extra data // version 0x10, // extra data flag @@ -970,7 +969,6 @@ func TestPersistentStorageSlabIterator(t *testing.T) { // (data slab) next: 0, data: [0] id4: { - // extra data // version 0x10, // extra data flag diff --git a/typeinfo.go b/typeinfo.go index 35eb718d..7241fa15 100644 --- a/typeinfo.go +++ b/typeinfo.go @@ -19,11 +19,17 @@ package atree import ( + "encoding/binary" + "fmt" + "github.com/fxamacker/cbor/v2" ) type TypeInfo interface { Encode(*cbor.StreamEncoder) error + IsComposite() bool + ID() string + // TODO: maybe add a copy function because decoded TypeInfo can be shared by multiple slabs if not copied. } type TypeInfoDecoder func( @@ -32,3 +38,340 @@ type TypeInfoDecoder func( TypeInfo, error, ) + +type ExtraData interface { + isExtraData() bool + Encode(enc *Encoder) error +} + +// compositeExtraData is used for inlining composite values. +// compositeExtraData includes hkeys and keys with map extra data +// because hkeys and keys are the same in order and content for +// all values with the same composite type and map seed. +type compositeExtraData struct { + mapExtraData *MapExtraData + hkeys []Digest // hkeys is ordered by mapExtraData.Seed + keys []MapKey // keys is ordered by mapExtraData.Seed +} + +var _ ExtraData = &compositeExtraData{} + +const compositeExtraDataLength = 3 + +func (c *compositeExtraData) isExtraData() bool { + return true +} + +func (c *compositeExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(compositeExtraDataLength) + if err != nil { + return NewEncodingError(err) + } + + // element 0: map extra data + err = c.mapExtraData.Encode(enc) + if err != nil { + return err + } + + // element 1: digests + totalDigestSize := len(c.hkeys) * digestSize + + var digests []byte + if totalDigestSize <= len(enc.Scratch) { + digests = enc.Scratch[:totalDigestSize] + } else { + digests = make([]byte, totalDigestSize) + } + + for i := 0; i < len(c.hkeys); i++ { + binary.BigEndian.PutUint64(digests[i*digestSize:], uint64(c.hkeys[i])) + } + + err = enc.CBOR.EncodeBytes(digests) + if err != nil { + return NewEncodingError(err) + } + + // element 2: field names + err = enc.CBOR.EncodeArrayHead(uint64(len(c.keys))) + if err != nil { + return NewEncodingError(err) + } + + for _, key := range c.keys { + err = key.Encode(enc) + if err != nil { + return NewEncodingError(err) + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newCompositeExtraData( + dec *cbor.StreamDecoder, + decodeTypeInfo TypeInfoDecoder, + decodeStorable StorableDecoder, +) (*compositeExtraData, error) { + + length, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if length != compositeExtraDataLength { + return nil, NewDecodingError( + fmt.Errorf( + "composite extra data has invalid length %d, want %d", + length, + arrayExtraDataLength, + )) + } + + // element 0: map extra data + mapExtraData, err := newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, err + } + + // element 1: digests + digestBytes, err := dec.DecodeBytes() + if err != nil { + return nil, NewDecodingError(err) + } + + if len(digestBytes)%digestSize != 0 { + return nil, NewDecodingError( + fmt.Errorf( + "decoding digests failed: number of bytes %d is not multiple of %d", + len(digestBytes), + digestSize)) + } + + digestCount := len(digestBytes) / digestSize + + // element 2: keys + keyCount, err := dec.DecodeArrayHead() + if err != nil { + return nil, NewDecodingError(err) + } + + if keyCount != uint64(digestCount) { + return nil, NewDecodingError( + fmt.Errorf( + "decoding composite key failed: number of keys %d is different from number of digests %d", + keyCount, + digestCount)) + } + + hkeys := make([]Digest, digestCount) + for i := 0; i < digestCount; i++ { + hkeys[i] = Digest(binary.BigEndian.Uint64(digestBytes[i*digestSize:])) + } + + keys := make([]MapKey, keyCount) + for i := uint64(0); i < keyCount; i++ { + // Decode composite key + key, err := decodeStorable(dec, SlabIDUndefined, nil) + if err != nil { + // Wrap err as external error (if needed) because err is returned by StorableDecoder callback. + return nil, wrapErrorfAsExternalErrorIfNeeded(err, "failed to decode key's storable") + } + keys[i] = key + } + + return &compositeExtraData{mapExtraData: mapExtraData, hkeys: hkeys, keys: keys}, nil +} + +type compositeTypeID struct { + id string + fieldCount int +} + +type compositeTypeInfo struct { + index int + keys []MapKey +} + +type inlinedExtraData struct { + extraData []ExtraData + compositeTypes map[compositeTypeID]compositeTypeInfo + arrayTypes map[string]int +} + +func newInlinedExtraData() *inlinedExtraData { + return &inlinedExtraData{ + compositeTypes: make(map[compositeTypeID]compositeTypeInfo), + arrayTypes: make(map[string]int), + } +} + +// Encode encodes inlined extra data as CBOR array. +func (ied *inlinedExtraData) Encode(enc *Encoder) error { + err := enc.CBOR.EncodeArrayHead(uint64(len(ied.extraData))) + if err != nil { + return NewEncodingError(err) + } + + var tagNum uint64 + + for _, extraData := range ied.extraData { + switch extraData.(type) { + case *ArrayExtraData: + tagNum = CBORTagInlinedArrayExtraData + + case *MapExtraData: + tagNum = CBORTagInlinedMapExtraData + + case *compositeExtraData: + tagNum = CBORTagInlinedCompositeExtraData + + default: + return NewEncodingError(fmt.Errorf("failed to encode unsupported extra data type %T", extraData)) + } + + err = enc.CBOR.EncodeTagHead(tagNum) + if err != nil { + return NewEncodingError(err) + } + + err = extraData.Encode(enc) + if err != nil { + return err + } + } + + err = enc.CBOR.Flush() + if err != nil { + return NewEncodingError(err) + } + + return nil +} + +func newInlinedExtraDataFromData( + data []byte, + decMode cbor.DecMode, + decodeStorable StorableDecoder, + decodeTypeInfo TypeInfoDecoder, +) ([]ExtraData, []byte, error) { + + dec := decMode.NewByteStreamDecoder(data) + + count, err := dec.DecodeArrayHead() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + if count == 0 { + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: expect at least one inlined extra data")) + } + + inlinedExtraData := make([]ExtraData, count) + for i := uint64(0); i < count; i++ { + tagNum, err := dec.DecodeTagNumber() + if err != nil { + return nil, nil, NewDecodingError(err) + } + + switch tagNum { + case CBORTagInlinedArrayExtraData: + inlinedExtraData[i], err = newArrayExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedMapExtraData: + inlinedExtraData[i], err = newMapExtraData(dec, decodeTypeInfo) + if err != nil { + return nil, nil, err + } + + case CBORTagInlinedCompositeExtraData: + inlinedExtraData[i], err = newCompositeExtraData(dec, decodeTypeInfo, decodeStorable) + if err != nil { + return nil, nil, err + } + + default: + return nil, nil, NewDecodingError(fmt.Errorf("failed to decode inlined extra data: unsupported tag number %d", tagNum)) + } + } + + return inlinedExtraData, data[dec.NumBytesDecoded():], nil +} + +// addArrayExtraData returns index of deduplicated array extra data. +// Array extra data is deduplicated by array type info ID because array +// extra data only contains type info. +func (ied *inlinedExtraData) addArrayExtraData(data *ArrayExtraData) int { + id := data.TypeInfo.ID() + index, exist := ied.arrayTypes[id] + if exist { + return index + } + + index = len(ied.extraData) + ied.extraData = append(ied.extraData, data) + ied.arrayTypes[id] = index + return index +} + +// addMapExtraData returns index of map extra data. +// Map extra data is not deduplicated because it also contains count and seed. +func (ied *inlinedExtraData) addMapExtraData(data *MapExtraData) int { + index := len(ied.extraData) + ied.extraData = append(ied.extraData, data) + return index +} + +// addCompositeExtraData returns index of deduplicated composite extra data. +// Composite extra data is deduplicated by TypeInfo.ID() and number of fields, +// Composite fields can be removed but new fields can't be added, and existing field types can't be modified. +// Given this, composites with same type ID and same number of fields have the same fields. +// See https://developers.flow.com/cadence/language/contract-updatability#fields +func (ied *inlinedExtraData) addCompositeExtraData(data *MapExtraData, digests []Digest, keys []MapKey) int { + id := compositeTypeID{data.TypeInfo.ID(), int(data.Count)} + info, exist := ied.compositeTypes[id] + if exist { + return info.index + } + + compositeData := &compositeExtraData{ + mapExtraData: data, + hkeys: digests, + keys: keys, + } + + index := len(ied.extraData) + ied.extraData = append(ied.extraData, compositeData) + + ied.compositeTypes[id] = compositeTypeInfo{ + keys: keys, + index: index, + } + + return index +} + +// getCompositeTypeInfo returns index of composite type and cached keys. +// NOTE: use this function instead of addCompositeExtraData to check if +// composite type is already added to save some allocation. +func (ied *inlinedExtraData) getCompositeTypeInfo(t TypeInfo, fieldCount int) (int, []MapKey, bool) { + id := compositeTypeID{t.ID(), fieldCount} + info, exist := ied.compositeTypes[id] + if !exist { + return 0, nil, false + } + return info.index, info.keys, true +} + +func (ied *inlinedExtraData) empty() bool { + return len(ied.extraData) == 0 +} diff --git a/utils_test.go b/utils_test.go index a40a3599..90b7bda2 100644 --- a/utils_test.go +++ b/utils_test.go @@ -20,7 +20,9 @@ package atree import ( "flag" + "fmt" "math/rand" + "reflect" "testing" "time" @@ -91,6 +93,14 @@ type testTypeInfo struct { var _ TypeInfo = testTypeInfo{} +func (i testTypeInfo) IsComposite() bool { + return false +} + +func (i testTypeInfo) ID() string { + return fmt.Sprintf("uint64(%d)", i) +} + func (i testTypeInfo) Encode(enc *cbor.StreamEncoder) error { return enc.EncodeUint64(i.value) } @@ -100,13 +110,46 @@ func (i testTypeInfo) Equal(other TypeInfo) bool { return ok && i.value == otherTestTypeInfo.value } +const testCompositeTypeInfoTagNum = 246 + +type testCompositeTypeInfo struct { + value uint64 +} + +var _ TypeInfo = testCompositeTypeInfo{} + +func (i testCompositeTypeInfo) IsComposite() bool { + return true +} + +func (i testCompositeTypeInfo) ID() string { + return fmt.Sprintf("composite(%d)", i) +} + +func (i testCompositeTypeInfo) Encode(enc *cbor.StreamEncoder) error { + err := enc.EncodeTagHead(testCompositeTypeInfoTagNum) + if err != nil { + return err + } + return enc.EncodeUint64(i.value) +} + +func (i testCompositeTypeInfo) Equal(other TypeInfo) bool { + otherTestTypeInfo, ok := other.(testCompositeTypeInfo) + return ok && i.value == otherTestTypeInfo.value +} + func typeInfoComparator(a, b TypeInfo) bool { - x, ok := a.(testTypeInfo) - if !ok { + switch x := a.(type) { + case testTypeInfo: + return x.Equal(b) + + case testCompositeTypeInfo: + return x.Equal(b) + + default: return false } - y, ok := b.(testTypeInfo) - return ok && x.value == y.value } func newTestPersistentStorage(t testing.TB) *PersistentSlabStorage { @@ -323,21 +366,83 @@ func mapEqual(t *testing.T, tic TypeInfoComparator, a Value, b Value) { iterator1, err := m1.Iterator() require.NoError(t, err) - iterator2, err := m2.Iterator() - require.NoError(t, err) + if m1.Type().IsComposite() { + // Check element by key for composite type because + // composite fields can be rearranged to reuse seed and digests. - for { - key1, value1, err := iterator1.Next() - require.NoError(t, err) + for { + key1, value1, err := iterator1.Next() + require.NoError(t, err) + + if key1 == nil { + break + } + + iterator2, err := m2.Iterator() + require.NoError(t, err) + + var value2 Value + for { + key, value, err := iterator2.Next() + require.NoError(t, err) + require.NotNil(t, key) + + if reflect.DeepEqual(key, key1) { + value2 = value + break + } + } - key2, value2, err := iterator2.Next() + valueEqual(t, tic, value1, value2) + } + } else { + + iterator2, err := m2.Iterator() require.NoError(t, err) - valueEqual(t, tic, key1, key2) - valueEqual(t, tic, value1, value2) + for { + key1, value1, err := iterator1.Next() + require.NoError(t, err) - if key1 == nil || key2 == nil { - break + key2, value2, err := iterator2.Next() + require.NoError(t, err) + + valueEqual(t, tic, key1, key2) + valueEqual(t, tic, value1, value2) + + if key1 == nil || key2 == nil { + break + } } } } + +func valueIDToSlabID(vid ValueID) SlabID { + var id SlabID + copy(id.address[:], vid[:slabAddressSize]) + copy(id.index[:], vid[slabAddressSize:]) + return id +} + +func testInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} + +func testNotInlinedMapIDs(t *testing.T, address Address, m *OrderedMap) { + testNotInlinedSlabIDAndValueID(t, address, m.SlabID(), m.ValueID()) +} + +func testInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, SlabIDUndefined, slabID) + + require.Equal(t, expectedAddress[:], valueID[:slabAddressSize]) + require.NotEqual(t, SlabIndexUndefined[:], valueID[slabAddressSize:]) +} + +func testNotInlinedSlabIDAndValueID(t *testing.T, expectedAddress Address, slabID SlabID, valueID ValueID) { + require.Equal(t, expectedAddress, slabID.address) + require.NotEqual(t, SlabIndexUndefined, slabID.index) + + require.Equal(t, slabID.address[:], valueID[:slabAddressSize]) + require.Equal(t, slabID.index[:], valueID[slabAddressSize:]) +} diff --git a/value.go b/value.go index 06ce3a5c..3c6327fc 100644 --- a/value.go +++ b/value.go @@ -25,3 +25,12 @@ type Value interface { type ValueComparator func(SlabStorage, Value, Storable) (bool, error) type StorableComparator func(Storable, Storable) bool + +type parentUpdater func() error + +// valueNotifier is an interface that allows child value to notify and update parent. +type valueNotifier interface { + Value + ValueID() ValueID + setParentUpdater(parentUpdater) +}