diff --git a/cmd/decode-state-values/main.go b/cmd/decode-state-values/main.go index 70ecd7e7b4..b6f781258f 100644 --- a/cmd/decode-state-values/main.go +++ b/cmd/decode-state-values/main.go @@ -234,7 +234,7 @@ type interpreterStorage struct { var _ interpreter.Storage = &interpreterStorage{} -func (i interpreterStorage) GetStorageMap(_ common.Address, _ string, _ bool) *interpreter.StorageMap { +func (i interpreterStorage) GetStorageMap(_ *interpreter.Interpreter, _ common.Address, _ string, _ bool) *interpreter.DomainStorageMap { panic("unexpected GetStorageMap call") } diff --git a/go.mod b/go.mod index 6193bce06e..ac296d2e54 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/kr/pretty v0.3.1 github.com/leanovate/gopter v0.2.9 github.com/logrusorgru/aurora/v4 v4.0.0 - github.com/onflow/atree v0.8.0 + github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf github.com/rivo/uniseg v0.4.4 github.com/schollz/progressbar/v3 v3.13.1 github.com/stretchr/testify v1.9.0 @@ -54,7 +54,7 @@ require ( github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/assert v1.3.0 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.26.0 // indirect diff --git a/go.sum b/go.sum index cc2c3058d0..417d758195 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs= github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.0 h1:4ZexSFt8agMNzNisrsilL6RClWDC5YJnLHNIfTy4iuc= github.com/klauspost/cpuid/v2 v2.2.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kodova/html-to-markdown v1.0.1 h1:MJxQAnqxtss3DaPnm72DRV65HZiMQZF3DUAfEaTg+14= @@ -75,8 +74,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -108,11 +107,10 @@ github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go new file mode 100644 index 0000000000..67a51c1c35 --- /dev/null +++ b/interpreter/account_storagemap.go @@ -0,0 +1,341 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter + +import ( + goerrors "errors" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" +) + +// AccountStorageMap stores domain storage maps in an account. +type AccountStorageMap struct { + orderedMap *atree.OrderedMap +} + +// NewAccountStorageMap creates account storage map. +func NewAccountStorageMap( + memoryGauge common.MemoryGauge, + storage atree.SlabStorage, + address atree.Address, +) *AccountStorageMap { + common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) + + orderedMap, err := atree.NewMap( + storage, + address, + atree.NewDefaultDigesterBuilder(), + emptyTypeInfo, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// NewAccountStorageMapWithRootID loads existing account storage map with given atree SlabID. +func NewAccountStorageMapWithRootID( + storage atree.SlabStorage, + slabID atree.SlabID, +) *AccountStorageMap { + orderedMap, err := atree.NewMapWithRootID( + storage, + slabID, + atree.NewDefaultDigesterBuilder(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// DomainExists returns true if the given domain exists in the account storage map. +func (s *AccountStorageMap) DomainExists(domain string) bool { + key := StringStorageMapKey(domain) + + exists, err := s.orderedMap.Has( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return exists +} + +// GetDomain returns domain storage map for the given domain. +// If createIfNotExists is true and domain doesn't exist, new domain storage map +// is created and inserted into account storage map with given domain as key. +func (s *AccountStorageMap) GetDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain string, + createIfNotExists bool, +) *DomainStorageMap { + key := StringStorageMapKey(domain) + + storedValue, err := s.orderedMap.Get( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // Create domain storage map if needed. + + if createIfNotExists { + return s.NewDomain(gauge, interpreter, domain) + } + + return nil + } + + panic(errors.NewExternalError(err)) + } + + // Create domain storage map from raw atree value. + return NewDomainStorageMapWithAtreeValue(storedValue) +} + +// NewDomain creates new domain storage map and inserts it to AccountStorageMap with given domain as key. +func (s *AccountStorageMap) NewDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain string, +) *DomainStorageMap { + interpreter.recordStorageMutation() + + domainStorageMap := NewDomainStorageMap(gauge, s.orderedMap.Storage, s.orderedMap.Address()) + + key := StringStorageMapKey(domain) + + existingStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + domainStorageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + if existingStorable != nil { + panic(errors.NewUnexpectedError( + "account %x domain %s should not exist", + s.orderedMap.Address(), domain, + )) + } + + return domainStorageMap +} + +// WriteDomain sets or removes domain storage map in account storage map. +// If the given storage map is nil, domain is removed. +// If the given storage map is non-nil, domain is added/updated. +// Returns true if domain storage map previously existed at the given domain. +func (s *AccountStorageMap) WriteDomain( + interpreter *Interpreter, + domain string, + storageMap *DomainStorageMap, +) (existed bool) { + if storageMap == nil { + return s.removeDomain(interpreter, domain) + } + return s.setDomain(interpreter, domain, storageMap) +} + +// setDomain sets domain storage map in the account storage map and returns true if domain previously existed. +// If the given domain already stores a domain storage map, it is overwritten. +func (s *AccountStorageMap) setDomain( + interpreter *Interpreter, + domain string, + storageMap *DomainStorageMap, +) (existed bool) { + interpreter.recordStorageMutation() + + key := StringStorageMapKey(domain) + + existingValueStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + storageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from overwritten storable + domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + domainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + + // NOTE: Don't call maybeValidateAtreeStorage() here because it is possible + // that domain storage map is in the process of being migrated to account + // storage map and state isn't consistent during migration. + + return +} + +// removeDomain removes domain storage map with given domain in account storage map, if it exists. +func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain string) (existed bool) { + interpreter.recordStorageMutation() + + key := StringStorageMapKey(domain) + + existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // No-op to remove non-existent domain. + return + } + panic(errors.NewExternalError(err)) + } + + // Key + + // NOTE: Key is just an atree.Value (StringAtreeValue), not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(existingKeyStorable) + + // Value + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from removed storable + domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + domainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + + return +} + +func (s *AccountStorageMap) SlabID() atree.SlabID { + return s.orderedMap.SlabID() +} + +func (s *AccountStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +// Domains returns a set of domains in account storage map +func (s *AccountStorageMap) Domains() map[string]struct{} { + domains := make(map[string]struct{}) + + iterator := s.Iterator() + + for { + k, err := iterator.mapIterator.NextKey() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil { + break + } + + domain := convertKeyToDomain(k) + domains[domain] = struct{}{} + } + + return domains +} + +// Iterator returns a mutable iterator (AccountStorageMapIterator), +// which allows iterating over the domain and domain storage map. +func (s *AccountStorageMap) Iterator() *AccountStorageMapIterator { + mapIterator, err := s.orderedMap.Iterator( + StorageMapKeyAtreeValueComparator, + StorageMapKeyAtreeValueHashInput, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMapIterator{ + mapIterator: mapIterator, + storage: s.orderedMap.Storage, + } +} + +// AccountStorageMapIterator is an iterator over AccountStorageMap. +type AccountStorageMapIterator struct { + mapIterator atree.MapIterator + storage atree.SlabStorage +} + +// Next returns the next domain and domain storage map. +// If there is no more domain, ("", nil) is returned. +func (i *AccountStorageMapIterator) Next() (string, *DomainStorageMap) { + k, v, err := i.mapIterator.Next() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil || v == nil { + return "", nil + } + + key := convertKeyToDomain(k) + + value := NewDomainStorageMapWithAtreeValue(v) + + return key, value +} + +func convertKeyToDomain(v atree.Value) string { + key, ok := v.(StringAtreeValue) + if !ok { + panic(errors.NewUnexpectedError("domain key type %T isn't expected", key)) + } + return string(key) +} diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go new file mode 100644 index 0000000000..cdc6fc868c --- /dev/null +++ b/interpreter/account_storagemap_test.go @@ -0,0 +1,766 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + "slices" + "strconv" + "strings" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestAccountStorageMapDomainExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + exist := accountStorageMap.DomainExists(domain) + require.False(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + // Check if domain exists + for _, domain := range runtime.AccountDomains { + exist := accountStorageMap.DomainExists(domain) + require.Equal(t, slices.Contains(existingDomains, domain), exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapGetDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = false + storagemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Nil(t, storagemap) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = false + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Equal(t, slices.Contains(existingDomains, domain), domainStoragemap != nil) + + if domainStoragemap != nil { + checkDomainStorageMapData(t, inter, domainStoragemap, accountValues[domain]) + } + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapCreateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = true + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStoragemap) + require.Equal(t, uint64(0), domainStoragemap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = true + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStoragemap) + require.Equal(t, uint64(len(accountValues[domain])), domainStoragemap.Count()) + + if !slices.Contains(existingDomains, domain) { + accountValues[domain] = make(domainStorageMapValues) + } + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + const count = 10 + for _, domain := range runtime.AccountDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.False(t, existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapRemoveDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.False(t, existed) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + delete(accountValues, domain) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapIterator(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + iterator := accountStorageMap.Iterator() + + // Test calling Next() twice on empty account storage map. + for range 2 { + domain, domainStorageMap := iterator.Next() + require.Empty(t, domain) + require.Nil(t, domainStorageMap) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + iterator := accountStorageMap.Iterator() + + domainCount := 0 + for { + domain, domainStorageMap := iterator.Next() + if domain == "" { + break + } + + domainCount++ + + require.True(t, slices.Contains(existingDomains, domain)) + require.NotNil(t, domainStorageMap) + + checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain]) + } + + // Test calling Next() after iterator reaches the end. + domain, domainStorageMap := iterator.Next() + require.True(t, domain == "") + require.Nil(t, domainStorageMap) + + require.Equal(t, len(existingDomains), domainCount) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapDomains(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + domains := accountStorageMap.Domains() + require.Equal(t, 0, len(domains)) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + domains := accountStorageMap.Domains() + require.Equal(t, len(existingDomains), len(domains)) + + for _, domain := range existingDomains { + _, exist := domains[domain] + require.True(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), make(accountStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) + require.Equal(t, uint64(0), accountStorageMap.Count()) + require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), accountValues, ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) + require.Equal(t, uint64(len(existingDomains)), accountStorageMap.Count()) + require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[string]domainStorageMapValues +) + +func createAccountStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + domains []string, + count int, + random *rand.Rand, +) (*interpreter.AccountStorageMap, accountStorageMapValues) { + + // Create account storage map + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + accountValues[domain] = domainValues + } + + return accountStorageMap, accountValues +} + +func writeRandomValuesToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + var value interpreter.Value + + if len(domainValues) == 0 { + // First element is a large value that is stored in its own slabs. + value = interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + } else { + value = interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + } + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapData iterates account storage map and compares values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + accountStorageMap *interpreter.AccountStorageMap, + expectedAccountValues accountStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == "" { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + + checkDomainStorageMapData(tb, inter, domainStorageMap, expectedDomainValues) + } + + require.Equal(tb, len(expectedAccountValues), domainCount) +} + +// checkDomainStorageMapData iterates domain storage map and compares values with given expectedDomainValues. +func checkDomainStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + expectedDomainValues domainStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + count := 0 + iter := domainStorageMap.Iterator(nil) + for { + k, v := iter.Next() + if k == nil { + break + } + + count++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue := expectedDomainValues[interpreter.StringStorageMapKey(kv)] + + checkCadenceValue(tb, inter, v, expectedValue) + } + + require.Equal(tb, len(expectedDomainValues), count) +} + +func checkCadenceValue( + tb testing.TB, + inter *interpreter.Interpreter, + value, + expectedValue interpreter.Value, +) { + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) +} diff --git a/interpreter/storagemap.go b/interpreter/domain_storagemap.go similarity index 53% rename from interpreter/storagemap.go rename to interpreter/domain_storagemap.go index 54c9e2acbb..336828ce64 100644 --- a/interpreter/storagemap.go +++ b/interpreter/domain_storagemap.go @@ -20,6 +20,7 @@ package interpreter import ( goerrors "errors" + "time" "github.com/onflow/atree" @@ -27,12 +28,13 @@ import ( "github.com/onflow/cadence/errors" ) -// StorageMap is an ordered map which stores values in an account. -type StorageMap struct { +// DomainStorageMap is an ordered map which stores values in an account domain. +type DomainStorageMap struct { orderedMap *atree.OrderedMap } -func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *StorageMap { +// NewDomainStorageMap creates new domain storage map for given address. +func NewDomainStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *DomainStorageMap { common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) orderedMap, err := atree.NewMap( @@ -45,12 +47,16 @@ func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, ad panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } -func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *StorageMap { +// NewDomainStorageMapWithRootID loads domain storage map with given slabID. +// This function is only used with legacy domain registers for unmigrated accounts. +// For migrated accounts, NewDomainStorageMapWithAtreeValue() is used to load +// domain storage map as an element of AccountStorageMap. +func NewDomainStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *DomainStorageMap { orderedMap, err := atree.NewMapWithRootID( storage, slabID, @@ -60,13 +66,51 @@ func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *St panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } +// newDomainStorageMapWithAtreeStorable loads domain storage map with given atree.Storable. +func newDomainStorageMapWithAtreeStorable(storage atree.SlabStorage, storable atree.Storable) *DomainStorageMap { + + // NOTE: Don't use interpreter.StoredValue() to convert given storable + // to DomainStorageMap because DomainStorageMap isn't interpreter.Value. + + value, err := storable.StoredValue(storage) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return NewDomainStorageMapWithAtreeValue(value) +} + +// NewDomainStorageMapWithAtreeValue loads domain storage map with given atree.Value. +// This function is used by migrated account to load domain as an element of AccountStorageMap. +func NewDomainStorageMapWithAtreeValue(value atree.Value) *DomainStorageMap { + // Check if type of given value is *atree.OrderedMap + dm, isAtreeOrderedMap := value.(*atree.OrderedMap) + if !isAtreeOrderedMap { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected type %T, expect *atree.OrderedMap", + value, + )) + } + + // Check if TypeInfo of atree.OrderedMap is EmptyTypeInfo + dt, isEmptyTypeInfo := dm.Type().(EmptyTypeInfo) + if !isEmptyTypeInfo { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected encoded type %T, expect EmptyTypeInfo", + dt, + )) + } + + return &DomainStorageMap{orderedMap: dm} +} + // ValueExists returns true if the given key exists in the storage map. -func (s StorageMap) ValueExists(key StorageMapKey) bool { +func (s DomainStorageMap) ValueExists(key StorageMapKey) bool { exists, err := s.orderedMap.Has( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -81,7 +125,7 @@ func (s StorageMap) ValueExists(key StorageMapKey) bool { // ReadValue returns the value for the given key. // Returns nil if the key does not exist. -func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { +func (s DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { storedValue, err := s.orderedMap.Get( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -102,7 +146,7 @@ func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value // If the given value is nil, the key is removed. // If the given value is non-nil, the key is added/updated. // Returns true if a value previously existed at the given key. -func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +func (s DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { if value == nil { return s.RemoveValue(interpreter, key) } else { @@ -112,8 +156,8 @@ func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, valu // SetValue sets a value in the storage map. // If the given key already stores a value, it is overwritten. -// Returns true if -func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +// Returns true if given key already exists and existing value is overwritten. +func (s DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { interpreter.recordStorageMutation() existingStorable, err := s.orderedMap.Set( @@ -126,20 +170,21 @@ func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - existed = existingStorable != nil if existed { existingValue := StoredValue(interpreter, existingStorable, interpreter.Storage()) existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was overwritten in parent container. interpreter.RemoveReferencedSlab(existingStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } // RemoveValue removes a value in the storage map, if it exists. -func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { +func (s DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { interpreter.recordStorageMutation() existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( @@ -155,9 +200,6 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - // Key // NOTE: Key is just an atree.Value, not an interpreter.Value, @@ -172,12 +214,78 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was removed from parent container. interpreter.RemoveReferencedSlab(existingValueStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } +// DeepRemove removes all elements (and their slabs) of domain storage map. +func (s *DomainStorageMap) DeepRemove(interpreter *Interpreter, hasNoParentContainer bool) { + + config := interpreter.SharedState.Config + + if config.TracingEnabled { + startTime := time.Now() + + typeInfo := "DomainStorageMap" + count := s.Count() + + defer func() { + interpreter.reportDomainStorageMapDeepRemoveTrace( + typeInfo, + int(count), + time.Since(startTime), + ) + }() + } + + // Remove nested values and storables + + // Remove keys and values + + storage := s.orderedMap.Storage + + err := s.orderedMap.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { + // Key + + // NOTE: Key is just an atree.Value, not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(keyStorable) + + // Value + + value := StoredValue(interpreter, valueStorable, storage) + value.DeepRemove(interpreter, false) // value is an element of v.dictionary because it is from PopIterate() callback. + interpreter.RemoveReferencedSlab(valueStorable) + }) + if err != nil { + panic(errors.NewExternalError(err)) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + if hasNoParentContainer { + interpreter.maybeValidateAtreeStorage() + } +} + +func (s DomainStorageMap) ValueID() atree.ValueID { + return s.orderedMap.ValueID() +} + +func (s DomainStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +func (s DomainStorageMap) Inlined() bool { + // This is only used for testing currently. + return s.orderedMap.Inlined() +} + // Iterator returns an iterator (StorageMapIterator), // which allows iterating over the keys and values of the storage map -func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { +func (s DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator { mapIterator, err := s.orderedMap.Iterator( StorageMapKeyAtreeValueComparator, StorageMapKeyAtreeValueHashInput, @@ -186,31 +294,23 @@ func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { panic(errors.NewExternalError(err)) } - return StorageMapIterator{ + return DomainStorageMapIterator{ gauge: gauge, mapIterator: mapIterator, storage: s.orderedMap.Storage, } } -func (s StorageMap) SlabID() atree.SlabID { - return s.orderedMap.SlabID() -} - -func (s StorageMap) Count() uint64 { - return s.orderedMap.Count() -} - -// StorageMapIterator is an iterator over StorageMap -type StorageMapIterator struct { +// DomainStorageMapIterator is an iterator over DomainStorageMap +type DomainStorageMapIterator struct { gauge common.MemoryGauge mapIterator atree.MapIterator storage atree.SlabStorage } // Next returns the next key and value of the storage map iterator. -// If there is no further key-value pair, ("", nil) is returned. -func (i StorageMapIterator) Next() (atree.Value, Value) { +// If there is no further key-value pair, (nil, nil) is returned. +func (i DomainStorageMapIterator) Next() (atree.Value, Value) { k, v, err := i.mapIterator.Next() if err != nil { panic(errors.NewExternalError(err)) @@ -230,7 +330,7 @@ func (i StorageMapIterator) Next() (atree.Value, Value) { // NextKey returns the next key of the storage map iterator. // If there is no further key, "" is returned. -func (i StorageMapIterator) NextKey() atree.Value { +func (i DomainStorageMapIterator) NextKey() atree.Value { k, err := i.mapIterator.NextKey() if err != nil { panic(errors.NewExternalError(err)) @@ -240,8 +340,8 @@ func (i StorageMapIterator) NextKey() atree.Value { } // NextValue returns the next value in the storage map iterator. -// If there is nop further value, nil is returned. -func (i StorageMapIterator) NextValue() Value { +// If there is no further value, nil is returned. +func (i DomainStorageMapIterator) NextValue() Value { v, err := i.mapIterator.NextValue() if err != nil { panic(errors.NewExternalError(err)) diff --git a/interpreter/domain_storagemap_test.go b/interpreter/domain_storagemap_test.go new file mode 100644 index 0000000000..9b1985dce1 --- /dev/null +++ b/interpreter/domain_storagemap_test.go @@ -0,0 +1,711 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestDomainStorageMapValueExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + exist := domainStorageMap.ValueExists(interpreter.StringStorageMapKey(key)) + require.False(t, exist) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + // Check if value exists + for key := range domainValues { + exist := domainStorageMap.ValueExists(key) + require.True(t, exist) + } + + // Check if random value exists + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + _, keyExist := domainValues[key] + + exist := domainStorageMap.ValueExists(key) + require.Equal(t, keyExist, exist) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapReadValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + v := domainStorageMap.ReadValue(nil, interpreter.StringStorageMapKey(key)) + require.Nil(t, v) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key, expectedValue := range domainValues { + value := domainStorageMap.ReadValue(nil, key) + require.NotNil(t, value) + + checkCadenceValue(t, inter, value, expectedValue) + } + + // Get non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + value := domainStorageMap.ReadValue(nil, key) + require.Nil(t, value) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapSetAndUpdateValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + const count = 10 + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + // Overwrite existing values + n := random.Int() + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + require.Equal(t, uint64(count), domainStorageMap.Count()) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapRemoveValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + existed := domainStorageMap.WriteValue(inter, interpreter.StringStorageMapKey(key), nil) + require.False(t, existed) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + } + + // Remove non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + existed := domainStorageMap.WriteValue(inter, key, nil) + require.False(t, existed) + } + + clear(domainValues) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNext(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling Next() twice on empty account storage map. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k, v := iterator.Next() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + + checkCadenceValue(t, inter, v, expectedValue) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextKey(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k := iterator.NextKey() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + _, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + v := iterator.NextValue() + if v == nil { + break + } + + elementCount++ + + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + + match := false + for _, expectedValue := range domainValues { + if ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue) { + match = true + break + } + } + require.True(t, match) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling NextValue() after iterator reaches the end. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), make(domainStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) + + t.Run("non-empty", func(t *testing.T) { + + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), domainValues, ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) +} + +func createDomainStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + count int, + random *rand.Rand, +) (*interpreter.DomainStorageMap, domainStorageMapValues) { + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + return domainStorageMap, domainValues +} + +func atreeValueIDToSlabID(vid atree.ValueID) atree.SlabID { + return atree.NewSlabID( + atree.Address(vid[:8]), + atree.SlabIndex(vid[8:]), + ) +} diff --git a/interpreter/interpreter.go b/interpreter/interpreter.go index a163efb01f..9fdbf1d135 100644 --- a/interpreter/interpreter.go +++ b/interpreter/interpreter.go @@ -237,7 +237,7 @@ func (c TypeCodes) Merge(codes TypeCodes) { type Storage interface { atree.SlabStorage - GetStorageMap(address common.Address, domain string, createIfNotExists bool) *StorageMap + GetStorageMap(inter *Interpreter, address common.Address, domain string, createIfNotExists bool) *DomainStorageMap CheckHealth() error } @@ -2681,7 +2681,7 @@ func (interpreter *Interpreter) StoredValueExists( domain string, identifier StorageMapKey, ) bool { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return false } @@ -2693,7 +2693,7 @@ func (interpreter *Interpreter) ReadStored( domain string, identifier StorageMapKey, ) Value { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return nil } @@ -2706,7 +2706,7 @@ func (interpreter *Interpreter) WriteStored( key StorageMapKey, value Value, ) (existed bool) { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, true) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, true) return accountStorage.WriteValue(interpreter, key, value) } @@ -4069,7 +4069,7 @@ func (interpreter *Interpreter) IsSubTypeOfSemaType(staticSubType StaticType, su } func (interpreter *Interpreter) domainPaths(address common.Address, domain common.PathDomain) []Value { - storageMap := interpreter.Storage().GetStorageMap(address, domain.Identifier(), false) + storageMap := interpreter.Storage().GetStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { return []Value{} } @@ -4164,7 +4164,7 @@ func (interpreter *Interpreter) newStorageIterationFunction( parameterTypes := fnType.ParameterTypes() returnType := fnType.ReturnTypeAnnotation.Type - storageMap := config.Storage.GetStorageMap(address, domain.Identifier(), false) + storageMap := config.Storage.GetStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { // if nothing is stored, no iteration is required return Void diff --git a/interpreter/interpreter_tracing.go b/interpreter/interpreter_tracing.go index 365eebed42..10ee3dd418 100644 --- a/interpreter/interpreter_tracing.go +++ b/interpreter/interpreter_tracing.go @@ -30,9 +30,10 @@ const ( tracingImportPrefix = "import." // type prefixes - tracingArrayPrefix = "array." - tracingDictionaryPrefix = "dictionary." - tracingCompositePrefix = "composite." + tracingArrayPrefix = "array." + tracingDictionaryPrefix = "dictionary." + tracingCompositePrefix = "composite." + tracingDomainStorageMapPrefix = "domainstoragemap." // Value operation postfixes tracingConstructPostfix = "construct" @@ -162,6 +163,20 @@ func (interpreter *Interpreter) reportDictionaryValueDeepRemoveTrace( ) } +func (interpreter *Interpreter) reportDomainStorageMapDeepRemoveTrace( + typeInfo string, + count int, + duration time.Duration, +) { + config := interpreter.SharedState.Config + config.OnRecordTrace( + interpreter, + tracingDomainStorageMapPrefix+tracingDeepRemovePostfix, + duration, + prepareArrayAndMapValueTraceAttrs(typeInfo, count), + ) +} + func (interpreter *Interpreter) reportDictionaryValueDestroyTrace( typeInfo string, count int, diff --git a/interpreter/misc_test.go b/interpreter/misc_test.go index 8ec9d08532..e00d00f525 100644 --- a/interpreter/misc_test.go +++ b/interpreter/misc_test.go @@ -5350,7 +5350,7 @@ func TestInterpretReferenceFailableDowncasting(t *testing.T) { ) domain := storagePath.Domain.Identifier() - storageMap := storage.GetStorageMap(storageAddress, domain, true) + storageMap := storage.GetStorageMap(inter, storageAddress, domain, true) storageMapKey := interpreter.StringStorageMapKey(storagePath.Identifier) storageMap.WriteValue(inter, storageMapKey, r) diff --git a/interpreter/storage.go b/interpreter/storage.go index c5bdcb0b87..992a039e69 100644 --- a/interpreter/storage.go +++ b/interpreter/storage.go @@ -130,7 +130,7 @@ func (k StorageKey) IsLess(o StorageKey) bool { // InMemoryStorage type InMemoryStorage struct { *atree.BasicSlabStorage - StorageMaps map[StorageKey]*StorageMap + StorageMaps map[StorageKey]*DomainStorageMap memoryGauge common.MemoryGauge } @@ -158,22 +158,23 @@ func NewInMemoryStorage(memoryGauge common.MemoryGauge) InMemoryStorage { return InMemoryStorage{ BasicSlabStorage: slabStorage, - StorageMaps: make(map[StorageKey]*StorageMap), + StorageMaps: make(map[StorageKey]*DomainStorageMap), memoryGauge: memoryGauge, } } func (i InMemoryStorage) GetStorageMap( + _ *Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *StorageMap, + storageMap *DomainStorageMap, ) { key := NewStorageKey(i.memoryGauge, address, domain) storageMap = i.StorageMaps[key] if storageMap == nil && createIfNotExists { - storageMap = NewStorageMap(i.memoryGauge, i, atree.Address(address)) + storageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address)) i.StorageMaps[key] = storageMap } return storageMap diff --git a/interpreter/storage_test.go b/interpreter/storage_test.go index 0693df2d9b..16fd679a19 100644 --- a/interpreter/storage_test.go +++ b/interpreter/storage_test.go @@ -524,7 +524,7 @@ func TestStorageOverwriteAndRemove(t *testing.T) { const storageMapKey = StringStorageMapKey("test") - storageMap := storage.GetStorageMap(address, "storage", true) + storageMap := storage.GetStorageMap(inter, address, "storage", true) storageMap.WriteValue(inter, storageMapKey, array1) // Overwriting delete any existing child slabs diff --git a/interpreter/stringatreevalue_test.go b/interpreter/stringatreevalue_test.go index f2e622a8a9..366b59b1ee 100644 --- a/interpreter/stringatreevalue_test.go +++ b/interpreter/stringatreevalue_test.go @@ -36,12 +36,6 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { storage := NewInMemoryStorage(nil) - storageMap := storage.GetStorageMap( - common.MustBytesToAddress([]byte{0x1}), - common.PathDomainStorage.Identifier(), - true, - ) - inter, err := NewInterpreter( nil, common.StringLocation("test"), @@ -51,6 +45,13 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { ) require.NoError(t, err) + storageMap := storage.GetStorageMap( + inter, + common.MustBytesToAddress([]byte{0x1}), + common.PathDomainStorage.Identifier(), + true, + ) + // Generate a large key to force the string to get stored in a separate slab keyValue := NewStringAtreeValue(nil, strings.Repeat("x", 10_000)) diff --git a/interpreter/value_test.go b/interpreter/value_test.go index 295f5a4346..26cfac8621 100644 --- a/interpreter/value_test.go +++ b/interpreter/value_test.go @@ -3806,7 +3806,7 @@ func TestValue_ConformsToStaticType(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap(testAddress, "storage", true) + storageMap := storage.GetStorageMap(inter, testAddress, "storage", true) storageMap.WriteValue(inter, StringStorageMapKey("test"), TrueValue) value := valueFactory(inter) diff --git a/runtime/capabilitycontrollers_test.go b/runtime/capabilitycontrollers_test.go index fc6f692fe4..d00fb18dae 100644 --- a/runtime/capabilitycontrollers_test.go +++ b/runtime/capabilitycontrollers_test.go @@ -3251,7 +3251,10 @@ func TestRuntimeCapabilityControllers(t *testing.T) { ) require.NoError(t, err) + // Use *interpreter.Interpreter(nil) here because createIfNotExists is false. + storageMap := storage.GetStorageMap( + nil, common.MustBytesToAddress([]byte{0x1}), stdlib.PathCapabilityStorageDomain, false, @@ -3841,6 +3844,7 @@ func TestRuntimeCapabilitiesGetBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainPublic.Identifier(), true, @@ -3948,6 +3952,7 @@ func TestRuntimeCapabilitiesPublishBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainStorage.Identifier(), true, @@ -4038,6 +4043,7 @@ func TestRuntimeCapabilitiesUnpublishBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainPublic.Identifier(), true, diff --git a/runtime/contract_test.go b/runtime/contract_test.go index 2bb89a2cb5..a009906aa0 100644 --- a/runtime/contract_test.go +++ b/runtime/contract_test.go @@ -223,7 +223,7 @@ func TestRuntimeContract(t *testing.T) { getContractValueExists := func() bool { storageMap := NewStorage(storage, nil). - GetStorageMap(signerAddress, StorageDomainContract, false) + GetStorageMap(inter, signerAddress, StorageDomainContract, false) if storageMap == nil { return false } diff --git a/runtime/environment.go b/runtime/environment.go index 7873d4e990..1806f18698 100644 --- a/runtime/environment.go +++ b/runtime/environment.go @@ -1107,6 +1107,7 @@ func (e *interpreterEnvironment) loadContract( location := compositeType.Location if addressLocation, ok := location.(common.AddressLocation); ok { storageMap := e.storage.GetStorageMap( + inter, addressLocation.Address, StorageDomainContract, false, diff --git a/runtime/ft_test.go b/runtime/ft_test.go index b9374d986e..3e37164cf7 100644 --- a/runtime/ft_test.go +++ b/runtime/ft_test.go @@ -1084,6 +1084,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { ) contractStorage := storage.GetStorageMap( + inter, contractsAddress, StorageDomainContract, true, @@ -1119,6 +1120,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { ) userStorage := storage.GetStorageMap( + inter, userAddress, common.PathDomainStorage.Identifier(), true, diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go new file mode 100644 index 0000000000..2298e99e38 --- /dev/null +++ b/runtime/migrate_domain_registers.go @@ -0,0 +1,202 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +type AccountStorageMaps = *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap] + +type GetDomainStorageMapFunc func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, +) (*interpreter.DomainStorageMap, error) + +type DomainRegisterMigration struct { + ledger atree.Ledger + storage atree.SlabStorage + inter *interpreter.Interpreter + memoryGauge common.MemoryGauge + getDomainStorageMap GetDomainStorageMapFunc +} + +func NewDomainRegisterMigration( + ledger atree.Ledger, + storage atree.SlabStorage, + inter *interpreter.Interpreter, + memoryGauge common.MemoryGauge, +) *DomainRegisterMigration { + return &DomainRegisterMigration{ + ledger: ledger, + storage: storage, + inter: inter, + memoryGauge: memoryGauge, + getDomainStorageMap: getDomainStorageMapFromLegacyDomainRegister, + } +} + +// SetGetDomainStorageMapFunc allows user to provide custom GetDomainStorageMap function. +func (m *DomainRegisterMigration) SetGetDomainStorageMapFunc( + getDomainStorageMapFunc GetDomainStorageMapFunc, +) { + m.getDomainStorageMap = getDomainStorageMapFunc +} + +// MigrateAccounts migrates given accounts. +func (m *DomainRegisterMigration) MigrateAccounts( + accounts *orderedmap.OrderedMap[common.Address, struct{}], + pred func(common.Address) bool, +) ( + AccountStorageMaps, + error, +) { + if accounts == nil || accounts.Len() == 0 { + return nil, nil + } + + var migratedAccounts AccountStorageMaps + + for pair := accounts.Oldest(); pair != nil; pair = pair.Next() { + address := pair.Key + + if !pred(address) { + continue + } + + migrated, err := isMigrated(m.ledger, address) + if err != nil { + return nil, err + } + if migrated { + continue + } + + accountStorageMap, err := m.MigrateAccount(address) + if err != nil { + return nil, err + } + + if accountStorageMap == nil { + continue + } + + if migratedAccounts == nil { + migratedAccounts = &orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap]{} + } + migratedAccounts.Set(address, accountStorageMap) + } + + return migratedAccounts, nil +} + +func (m *DomainRegisterMigration) MigrateAccount( + address common.Address, +) (*interpreter.AccountStorageMap, error) { + + // Migrate existing domains + accountStorageMap, err := m.migrateDomains(address) + if err != nil { + return nil, err + } + + if accountStorageMap == nil { + // Nothing migrated + return nil, nil + } + + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + + // Write account register + errors.WrapPanic(func() { + err = m.ledger.SetValue( + address[:], + []byte(AccountStorageKey), + accountStorageMapSlabIndex[:], + ) + }) + if err != nil { + return nil, interpreter.WrappedExternalError(err) + } + + return accountStorageMap, nil +} + +// migrateDomains migrates existing domain storage maps and removes domain registers. +func (m *DomainRegisterMigration) migrateDomains( + address common.Address, +) (*interpreter.AccountStorageMap, error) { + + var accountStorageMap *interpreter.AccountStorageMap + + for _, domain := range AccountDomains { + + domainStorageMap, err := m.getDomainStorageMap(m.ledger, m.storage, address, domain) + if err != nil { + return nil, err + } + + if domainStorageMap == nil { + // Skip non-existent domain + continue + } + + if accountStorageMap == nil { + accountStorageMap = interpreter.NewAccountStorageMap(m.memoryGauge, m.storage, atree.Address(address)) + } + + // Migrate (insert) existing domain storage map to account storage map + existed := accountStorageMap.WriteDomain(m.inter, domain, domainStorageMap) + if existed { + // This shouldn't happen because we are inserting domain storage map into empty account storage map. + return nil, errors.NewUnexpectedError( + "failed to migrate domain %s for account %x: domain already exists in account storage map", domain, address, + ) + } + + // Remove migrated domain registers + errors.WrapPanic(func() { + // NOTE: removing non-existent domain registers is no-op. + err = m.ledger.SetValue( + address[:], + []byte(domain), + nil) + }) + if err != nil { + return nil, interpreter.WrappedExternalError(err) + } + } + + return accountStorageMap, nil +} + +func isMigrated(ledger atree.Ledger, address common.Address) (bool, error) { + _, registerExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(AccountStorageKey)) + if err != nil { + return false, err + } + return registerExists, nil +} diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go new file mode 100644 index 0000000000..28769c9e14 --- /dev/null +++ b/runtime/migrate_domain_registers_test.go @@ -0,0 +1,579 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime_test + +import ( + "math" + "math/rand" + goruntime "runtime" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" +) + +func TestMigrateDomainRegisters(t *testing.T) { + + alwaysMigrate := func(common.Address) bool { + return true + } + + neverMigrate := func(common.Address) bool { + return false + } + + migrateSpecificAccount := func(addressToMigrate common.Address) func(common.Address) bool { + return func(address common.Address) bool { + return address == addressToMigrate + } + } + + isAtreeRegister := func(key string) bool { + return key[0] == '$' && len(key) == 9 + } + + getNonAtreeRegisters := func(values map[string][]byte) map[string][]byte { + nonAtreeRegisters := make(map[string][]byte) + for k, v := range values { + ks := strings.Split(k, "|") + if !isAtreeRegister(ks[1]) && len(v) > 0 { + nonAtreeRegisters[k] = v + } + } + return nonAtreeRegisters + } + + address1 := common.MustBytesToAddress([]byte{0x1}) + address2 := common.MustBytesToAddress([]byte{0x2}) + + t.Run("no accounts", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + migratedAccounts, err := migrator.MigrateAccounts(nil, alwaysMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(ledger.StoredValues)) + }) + + t.Run("accounts without domain registers", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(ledger.StoredValues)) + }) + + t.Run("accounts with domain registers", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.NotNil(t, migratedAccounts) + require.Equal(t, accounts.Len(), migratedAccounts.Len()) + require.Equal(t, address2, migratedAccounts.Oldest().Key) + require.Equal(t, address1, migratedAccounts.Newest().Key) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + // Check non-atree registers + nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) + require.Equal(t, accounts.Len(), len(nonAtreeRegisters)) + require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) + require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) + + // Check atree storage + expectedRootSlabIDs := make([]atree.SlabID, 0, migratedAccounts.Len()) + for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { + accountStorageMap := pair.Value + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + } + + CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs) + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) + + t.Run("migrated accounts", func(t *testing.T) { + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithMigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) + + t.Run("never migration predicate", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, neverMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + }) + + t.Run("selective migration predicate", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, migrateSpecificAccount(address2)) + require.NoError(t, err) + require.NotNil(t, migratedAccounts) + require.Equal(t, 1, migratedAccounts.Len()) + require.Equal(t, address2, migratedAccounts.Oldest().Key) + }) +} + +type domainInfo struct { + domain string + domainStorageMapCount int + maxDepth int +} + +type accountInfo struct { + address common.Address + domains []domainInfo +} + +func newTestLedgerWithUnmigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + var expectedDomainRootSlabIDs []atree.SlabID + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain), domainStorageMapValueID[8:]) + require.NoError(tb, err) + + vid := domainStorageMap.ValueID() + expectedDomainRootSlabIDs = append( + expectedDomainRootSlabIDs, + atree.NewSlabID(atree.Address(address), atree.SlabIndex(vid[8:]))) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedDomainRootSlabIDs) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func newTestLedgerWithMigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + expectedRootSlabIDs := make([]atree.SlabID, 0, len(accounts)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + // Write account register + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + err := ledger.SetValue(address[:], []byte(runtime.AccountStorageKey), accountStorageMapSlabIndex[:]) + require.NoError(tb, err) + + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedRootSlabIDs) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func randomCadenceValues( + inter *interpreter.Interpreter, + address common.Address, + depth int, + random *rand.Rand, +) interpreter.EquatableValue { + var typeIndex int + if depth == 0 { + typeIndex = random.Intn(typeLargeString + 1) + } else { + typeIndex = random.Intn(maxType) + } + + switch typeIndex { + case typeUint8: + num := random.Intn(math.MaxUint8 + 1) + return interpreter.NewUnmeteredUInt8Value(uint8(num)) + + case typeUint16: + num := random.Intn(math.MaxUint16 + 1) + return interpreter.NewUnmeteredUInt16Value(uint16(num)) + + case typeUint32: + num := random.Uint32() + return interpreter.NewUnmeteredUInt32Value(num) + + case typeUint64: + num := random.Uint64() + return interpreter.NewUnmeteredUInt64Value(num) + + case typeSmallString: + const maxSmallStringLength = 32 + + size := random.Intn(maxSmallStringLength + 1) + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeLargeString: + const minLargeStringLength = 256 + const maxLargeStringLength = 1024 + + size := random.Intn(maxLargeStringLength+1-minLargeStringLength) + minLargeStringLength + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeArray: + const minArrayLength = 1 + const maxArrayLength = 20 + + size := random.Intn(maxArrayLength+1-minArrayLength) + minArrayLength + + arrayType := interpreter.NewVariableSizedStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + values := make([]interpreter.Value, size) + for i := range size { + values[i] = randomCadenceValues(inter, common.ZeroAddress, depth, random) + } + + return interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + arrayType, + address, + values..., + ) + + case typeDictionary: + const minDictLength = 1 + const maxDictLength = 20 + + size := random.Intn(maxDictLength+1-minDictLength) + minDictLength + + dictType := interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + keyAndValues := make([]interpreter.Value, 0, size*2) + for i := range size * 2 { + if i%2 == 0 { + // Key (0 depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, 0, random)) + } else { + // Value (decremented depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, depth, random)) + } + } + + return interpreter.NewDictionaryValueWithAddress(inter, interpreter.EmptyLocationRange, dictType, address, keyAndValues...) + + default: + panic(errors.NewUnreachableError()) + } +} + +const ( + typeUint8 = iota + typeUint16 + typeUint32 + typeUint64 + typeSmallString + typeLargeString + typeArray + typeDictionary + maxType +) diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index d6d389ff73..669b6152c1 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -871,7 +871,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) }) t.Run("storage used non empty", func(t *testing.T) { @@ -898,7 +898,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { OnGetStorageUsed: func(_ Address) (uint64, error) { // Before the storageUsed function is invoked, the deltas must have been committed. // So the encoded slabs must have been metered at this point. - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) storageUsedInvoked = true return 1, nil }, @@ -918,7 +918,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { require.NoError(t, err) assert.True(t, storageUsedInvoked) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) }) } @@ -1073,7 +1073,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 75, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 114, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("string in loop", func(t *testing.T) { @@ -1122,7 +1122,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 61455, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 61501, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("composite", func(t *testing.T) { @@ -1173,6 +1173,6 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 58323, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 58369, int(meter.getMemory(common.MemoryKindBytes))) }) } diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 7f942cbddf..ce4c2219b4 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5780,21 +5780,28 @@ func TestRuntimeContractWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to contract domain storage map + // storage index to account storage map { addressValue[:], - []byte("contract"), + []byte(AccountStorageKey), }, // contract value + // NOTE: contract value is empty because it is inlined in contract domain storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, writes, ) @@ -5829,8 +5836,10 @@ func TestRuntimeContractWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because contract value is inlined in contract storage map. - // NOTE: contract value slab doesn't exist. + // Account storage map is modified because: + // - contract value is inlined in contract storage map, and + // - contract storage map is inlined in account storage map. + // NOTE: both contract storage map slab and contract value slab don't exist. { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, @@ -5927,10 +5936,10 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to contract domain storage map + // storage index to account storage map { addressValue[:], - []byte("contract"), + []byte(AccountStorageKey), }, // contract value // NOTE: contract value slab is empty because it is inlined in contract domain storage map @@ -5939,10 +5948,16 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, writes, ) @@ -5971,22 +5986,23 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // account storage map + // NOTE: account storage map is updated with new storage domain storage map (inlined). { addressValue[:], - []byte("storage"), + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, // resource value // NOTE: resource value slab is empty because it is inlined in storage domain storage map { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, }, // storage domain storage map - // NOTE: resource value slab is inlined. + // NOTE: storage domain storage map is empty because it is inlined in account storage map. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5}, }, }, writes, @@ -6045,11 +6061,12 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because resource value is inlined in storage map + // Account storage map is modified because resource value is inlined in storage map, + // and storage map is inlined in account storage map. // NOTE: resource value slab is empty. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, writes, @@ -7562,7 +7579,7 @@ func TestRuntimeComputationMetring(t *testing.T) { `, ok: true, hits: 3, - intensity: 76, + intensity: 115, }, } diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 3008c85fff..f1d0ad703e 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -28,6 +28,7 @@ import ( "github.com/onflow/cadence/common" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/runtime_utils" ) @@ -211,14 +212,62 @@ func TestRuntimeSharedState(t *testing.T) { require.Equal(t, []ownerKeyPair{ + // Read account domain register to check if it is a migrated account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, + // Read contract domain register to check if it is a unmigrated account + // Read returns no value. { owner: signerAddress[:], key: []byte(StorageDomainContract), }, + // Read all available domain registers to check if it is a new account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, { owner: signerAddress[:], key: []byte(StorageDomainContract), }, + { + owner: signerAddress[:], + key: []byte(stdlib.InboxStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.CapabilityControllerStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.CapabilityControllerTagStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.PathCapabilityStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.AccountCapabilityStorageDomain), + }, + // Read account domain register + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map { owner: signerAddress[:], key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, diff --git a/runtime/storage.go b/runtime/storage.go index 7b9a567285..d2559818ad 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -30,17 +30,40 @@ import ( "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/stdlib" ) -const StorageDomainContract = "contract" +const ( + StorageDomainContract = "contract" + AccountStorageKey = "stored" +) type Storage struct { *atree.PersistentSlabStorage - NewStorageMaps *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] - storageMaps map[interpreter.StorageKey]*interpreter.StorageMap + + // NewAccountStorageMapSlabIndices contains root slab index of new accounts' storage map. + // The indices are saved using Ledger.SetValue() during Commit(). + // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. + NewAccountStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] + + // unmigratedAccounts are accounts that were accessed but not migrated. + unmigratedAccounts *orderedmap.OrderedMap[common.Address, struct{}] + + // cachedAccountStorageMaps is a cache of account storage maps. + // Key is StorageKey{address, accountStorageKey} and value is account storage map. + cachedAccountStorageMaps map[interpreter.StorageKey]*interpreter.AccountStorageMap + + // cachedDomainStorageMaps is a cache of domain storage maps. + // Key is StorageKey{address, domain} and value is domain storage map. + cachedDomainStorageMaps map[interpreter.StorageKey]*interpreter.DomainStorageMap + + // contractUpdates is a cache of contract updates. + // Key is StorageKey{contract_address, contract_name} and value is contract composite value. contractUpdates *orderedmap.OrderedMap[interpreter.StorageKey, *interpreter.CompositeValue] - Ledger atree.Ledger - memoryGauge common.MemoryGauge + + Ledger atree.Ledger + + memoryGauge common.MemoryGauge } var _ atree.SlabStorage = &Storage{} @@ -76,88 +99,319 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { decodeTypeInfo, ) return &Storage{ - Ledger: ledger, - PersistentSlabStorage: persistentSlabStorage, - storageMaps: map[interpreter.StorageKey]*interpreter.StorageMap{}, - memoryGauge: memoryGauge, + Ledger: ledger, + PersistentSlabStorage: persistentSlabStorage, + cachedAccountStorageMaps: map[interpreter.StorageKey]*interpreter.AccountStorageMap{}, + cachedDomainStorageMaps: map[interpreter.StorageKey]*interpreter.DomainStorageMap{}, + memoryGauge: memoryGauge, } } const storageIndexLength = 8 +// GetStorageMap returns existing or new domain storage map for the given account and domain. func (s *Storage) GetStorageMap( + inter *interpreter.Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *interpreter.StorageMap, + storageMap *interpreter.DomainStorageMap, ) { - key := interpreter.NewStorageKey(s.memoryGauge, address, domain) - storageMap = s.storageMaps[key] - if storageMap == nil { + // Account can be migrated account, new account, or unmigrated account. + // + // + // ### Migrated Account + // + // Migrated account is account with AccountStorageKey register. + // Migrated account has account storage map, which contains domain storage maps + // with domain as key. + // + // If domain exists in the account storage map, domain storage map is returned. + // + // If domain doesn't exist and createIfNotExists is true, + // new domain storage map is created, inserted into account storage map, and returned. + // + // If domain doesn't exist and createIfNotExists is false, nil is returned. + // + // + // ### New Account + // + // New account is account without AccountStorageKey register and without any domain registers. + // NOTE: new account's AccountStorageKey register is persisted in Commit(). + // + // If createIfNotExists is true, + // - new account storage map is created + // - new domain storage map is created + // - domain storage map is inserted into account storage map + // - domain storage map is returned + // + // If createIfNotExists is false, nil is returned. + // + // + // ### Unmigrated Account + // + // Unmigrated account is account with at least one domain register. + // Unmigrated account has domain registers and corresponding domain storage maps. + // + // If domain exists (domain register exists), domain storage map is loaded and returned. + // + // If domain doesn't exist and createIfNotExists is true, + // new domain storage map is created and returned. + // NOTE: given account would be migrated in Commit() since this is write op. + // + // If domain doesn't exist and createIfNotExists is false, nil is returned. + // + // + // ### Migration of unmigrated accounts + // + // Migration happens in Commit() if unmigrated account has write ops. + // NOTE: Commit() is not called by this function. + // + // Specifically, + // - unmigrated account is migrated in Commit() if there are write ops. + // For example, inserting values in unmigrated account triggers migration in Commit(). + // - unmigrated account is unchanged if there are only read ops. + // For example, iterating values in unmigrated account doesn't trigger migration, + // and checking if domain exists doesn't trigger migration. + + // Get cached domain storage map if it exists. + + domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + if domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey]; domainStorageMap != nil { + return domainStorageMap + } - // Load data through the runtime interface + // Get (or create) domain storage map from existing account storage map + // if account is migrated account. - var data []byte - var err error - errors.WrapPanic(func() { - data, err = s.Ledger.GetValue(key.Address[:], []byte(key.Key)) - }) - if err != nil { - panic(interpreter.WrappedExternalError(err)) + accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) + + accountStorageMap, err := s.getAccountStorageMap(accountStorageKey) + if err != nil { + panic(err) + } + + if accountStorageMap != nil { + // This is migrated account. + + // Get (or create) domain storage map from account storage map. + domainStorageMap := accountStorageMap.GetDomain(s.memoryGauge, inter, domain, createIfNotExists) + + // Cache domain storage map + if domainStorageMap != nil { + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap } - dataLength := len(data) - isStorageIndex := dataLength == storageIndexLength - if dataLength > 0 && !isStorageIndex { - // TODO: add dedicated error type? + return domainStorageMap + } + + // At this point, account is either new or unmigrated account. + + domainStorageMap, err := getDomainStorageMapFromLegacyDomainRegister(s.Ledger, s.PersistentSlabStorage, address, domain) + if err != nil { + panic(err) + } + + if domainStorageMap != nil { + // This is a unmigrated account with given domain register. + + // Sanity check that domain is among expected domains. + if _, exist := accountDomainsSet[domain]; !exist { + // TODO: maybe also log unexpected domain. panic(errors.NewUnexpectedError( - "invalid storage index for storage map with domain '%s': expected length %d, got %d", - domain, storageIndexLength, dataLength, + "unexpected domain %s exists for account %s", domain, address.String(), )) } - // Load existing storage or create and store new one + // Cache domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - atreeAddress := atree.Address(address) + // Add account to unmigrated account list + s.addUnmigratedAccount(address) - if isStorageIndex { - var slabIndex atree.SlabIndex - copy(slabIndex[:], data[:]) - storageMap = s.loadExistingStorageMap(atreeAddress, slabIndex) - } else if createIfNotExists { - storageMap = s.StoreNewStorageMap(atreeAddress, domain) - } + return domainStorageMap + } - if storageMap != nil { - s.storageMaps[key] = storageMap - } + // At this point, account is either new account or unmigrated account without given domain. + + // Domain doesn't exist. Return early if createIfNotExists is false. + + if !createIfNotExists { + return nil + } + + // Handle unmigrated account + unmigrated, err := s.isUnmigratedAccount(address) + if err != nil { + panic(err) + } + if unmigrated { + // Add account to unmigrated account list + s.addUnmigratedAccount(address) + + // Create new domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(s.memoryGauge, s, atree.Address(address)) + + // Cache new domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + + return domainStorageMap + } + + // Handle new account + + // Create account storage map + accountStorageMap = interpreter.NewAccountStorageMap(s.memoryGauge, s, atree.Address(address)) + + // Cache account storage map + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + // Create new domain storage map as an element in account storage map + domainStorageMap = accountStorageMap.NewDomain(s.memoryGauge, inter, domain) + + // Cache domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + + // Save new account and its account storage map root SlabID to new accout list + s.addNewAccount(accountStorageKey, accountStorageMap.SlabID().Index()) + + return domainStorageMap +} + +// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise. +func (s *Storage) getAccountStorageMap(accountStorageKey interpreter.StorageKey) (*interpreter.AccountStorageMap, error) { + + // Return cached account storage map if available. + + accountStorageMap := s.cachedAccountStorageMaps[accountStorageKey] + if accountStorageMap != nil { + return accountStorageMap, nil + } + + // Load account storage map if account storage register exists. + + accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + s.Ledger, + accountStorageKey.Address, + []byte(accountStorageKey.Key), + ) + if err != nil { + return nil, err + } + if !accountStorageRegisterExists { + return nil, nil + } + + slabID := atree.NewSlabID( + atree.Address(accountStorageKey.Address), + accountStorageSlabIndex, + ) + + accountStorageMap = interpreter.NewAccountStorageMapWithRootID(s, slabID) + + // Cache account storage map + + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + return accountStorageMap, nil +} + +// getDomainStorageMapFromLegacyDomainRegister returns domain storage map from legacy domain register. +func getDomainStorageMapFromLegacyDomainRegister( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, +) (*interpreter.DomainStorageMap, error) { + domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(domain)) + if err != nil { + return nil, err + } + if !domainRegisterExists { + return nil, nil } - return storageMap + slabID := atree.NewSlabID(atree.Address(address), domainStorageSlabIndex) + return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil } -func (s *Storage) loadExistingStorageMap(address atree.Address, slabIndex atree.SlabIndex) *interpreter.StorageMap { +func (s *Storage) addUnmigratedAccount(address common.Address) { + if s.unmigratedAccounts == nil { + s.unmigratedAccounts = &orderedmap.OrderedMap[common.Address, struct{}]{} + } + if !s.unmigratedAccounts.Contains(address) { + s.unmigratedAccounts.Set(address, struct{}{}) + } +} - slabID := atree.NewSlabID(address, slabIndex) +func (s *Storage) addNewAccount(accountStorageKey interpreter.StorageKey, slabIndex atree.SlabIndex) { + if s.NewAccountStorageMapSlabIndices == nil { + s.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + } + s.NewAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) +} - return interpreter.NewStorageMapWithRootID(s, slabID) +// isUnmigratedAccount returns true if given account has any domain registers. +func (s *Storage) isUnmigratedAccount(address common.Address) (bool, error) { + if s.unmigratedAccounts != nil && + s.unmigratedAccounts.Contains(address) { + return true, nil + } + + // Check most frequently used domains first, such as storage, public, private. + for _, domain := range AccountDomains { + _, domainExists, err := getSlabIndexFromRegisterValue(s.Ledger, address, []byte(domain)) + if err != nil { + return false, err + } + if domainExists { + return true, nil + } + } + + return false, nil } -func (s *Storage) StoreNewStorageMap(address atree.Address, domain string) *interpreter.StorageMap { - storageMap := interpreter.NewStorageMap(s.memoryGauge, s, address) +// getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. +// This function returns error if +// - underlying ledger panics, or +// - underlying ledger returns error when retrieving ledger value, or +// - retrieved ledger value is invalid (for atree.SlabIndex). +func getSlabIndexFromRegisterValue( + ledger atree.Ledger, + address common.Address, + key []byte, +) (atree.SlabIndex, bool, error) { + var data []byte + var err error + errors.WrapPanic(func() { + data, err = ledger.GetValue(address[:], key) + }) + if err != nil { + return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err) + } + + dataLength := len(data) - slabIndex := storageMap.SlabID().Index() + if dataLength == 0 { + return atree.SlabIndex{}, false, nil + } - storageKey := interpreter.NewStorageKey(s.memoryGauge, common.Address(address), domain) + isStorageIndex := dataLength == storageIndexLength + if !isStorageIndex { + // Invalid data in register - if s.NewStorageMaps == nil { - s.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + // TODO: add dedicated error type? + return atree.SlabIndex{}, false, errors.NewUnexpectedError( + "invalid storage index for storage map of account '%x': expected length %d, got %d", + address[:], storageIndexLength, dataLength, + ) } - s.NewStorageMaps.Set(storageKey, slabIndex) - return storageMap + return atree.SlabIndex(data), true, nil } func (s *Storage) recordContractUpdate( @@ -216,7 +470,7 @@ func (s *Storage) writeContractUpdate( key interpreter.StorageKey, contractValue *interpreter.CompositeValue, ) { - storageMap := s.GetStorageMap(key.Address, StorageDomainContract, true) + storageMap := s.GetStorageMap(inter, key.Address, StorageDomainContract, true) // NOTE: pass nil instead of allocating a Value-typed interface that points to nil storageMapKey := interpreter.StringStorageMapKey(key.Key) if contractValue == nil { @@ -249,6 +503,12 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b return err } + // Migrate accounts that have write ops before calling PersistentSlabStorage.FastCommit(). + err = s.migrateAccountsIfNeeded(inter) + if err != nil { + return err + } + // Commit the underlying slab storage's writes size := s.PersistentSlabStorage.DeltasSizeWithoutTempAddresses() @@ -270,11 +530,11 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } func (s *Storage) commitNewStorageMaps() error { - if s.NewStorageMaps == nil { + if s.NewAccountStorageMapSlabIndices == nil { return nil } - for pair := s.NewStorageMaps.Oldest(); pair != nil; pair = pair.Next() { + for pair := s.NewAccountStorageMapSlabIndices.Oldest(); pair != nil; pair = pair.Next() { var err error errors.WrapPanic(func() { err = s.Ledger.SetValue( @@ -291,6 +551,68 @@ func (s *Storage) commitNewStorageMaps() error { return nil } +func (s *Storage) migrateAccountsIfNeeded(inter *interpreter.Interpreter) error { + if s.unmigratedAccounts == nil || s.unmigratedAccounts.Len() == 0 { + return nil + } + return s.migrateAccounts(inter) +} + +func (s *Storage) migrateAccounts(inter *interpreter.Interpreter) error { + // Predicate function allows migration for accounts with write ops. + migrateAccountPred := func(address common.Address) bool { + return s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) + } + + // getDomainStorageMap function returns cached domain storage map if it is available + // before loading domain storage map from storage. + // This is necessary to migrate uncommitted (new) but cached domain storage map. + getDomainStorageMap := func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, + ) (*interpreter.DomainStorageMap, error) { + domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + // Get cached domain storage map if available. + domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] + + if domainStorageMap != nil { + return domainStorageMap, nil + } + + return getDomainStorageMapFromLegacyDomainRegister(ledger, storage, address, domain) + } + + migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) + migrator.SetGetDomainStorageMapFunc(getDomainStorageMap) + + migratedAccounts, err := migrator.MigrateAccounts(s.unmigratedAccounts, migrateAccountPred) + if err != nil { + return err + } + + if migratedAccounts == nil { + return nil + } + + // Update internal state with migrated accounts + for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { + address := pair.Key + accountStorageMap := pair.Value + + // Cache migrated account storage map + accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + // Remove migrated accounts from unmigratedAccounts + s.unmigratedAccounts.Delete(address) + } + + return nil +} + func (s *Storage) CheckHealth() error { // Check slab storage health rootSlabIDs, err := atree.CheckStorageHealth(s, -1) @@ -311,28 +633,51 @@ func (s *Storage) CheckHealth() error { accountRootSlabIDs[rootSlabID] = struct{}{} } - // Check that each storage map refers to an existing slab. - - found := map[atree.SlabID]struct{}{} + // Check that account storage maps and unmigrated domain storage maps + // match returned root slabs from atree.CheckStorageHealth. var storageMapStorageIDs []atree.SlabID - for _, storageMap := range s.storageMaps { //nolint:maprange + // Get cached account storage map slab IDs. + for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange storageMapStorageIDs = append( storageMapStorageIDs, storageMap.SlabID(), ) } + // Get cached unmigrated domain storage map slab IDs + for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange + address := storageKey.Address + + if s.unmigratedAccounts != nil && + s.unmigratedAccounts.Contains(address) { + + domainValueID := storageMap.ValueID() + + slabID := atree.NewSlabID( + atree.Address(address), + atree.SlabIndex(domainValueID[8:]), + ) + + storageMapStorageIDs = append( + storageMapStorageIDs, + slabID, + ) + } + } + sort.Slice(storageMapStorageIDs, func(i, j int) bool { a := storageMapStorageIDs[i] b := storageMapStorageIDs[j] return a.Compare(b) < 0 }) + found := map[atree.SlabID]struct{}{} + for _, storageMapStorageID := range storageMapStorageIDs { if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { - return errors.NewUnexpectedError("account storage map points to non-existing slab %s", storageMapStorageID) + return errors.NewUnexpectedError("account storage map (and unmigrated domain storage map) points to non-root slab %s", storageMapStorageID) } found[storageMapStorageID] = struct{}{} @@ -381,3 +726,23 @@ func (UnreferencedRootSlabsError) IsInternalError() {} func (e UnreferencedRootSlabsError) Error() string { return fmt.Sprintf("slabs not referenced: %s", e.UnreferencedRootSlabIDs) } + +var AccountDomains = []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPrivate.Identifier(), + common.PathDomainPublic.Identifier(), + StorageDomainContract, + stdlib.InboxStorageDomain, + stdlib.CapabilityControllerStorageDomain, + stdlib.CapabilityControllerTagStorageDomain, + stdlib.PathCapabilityStorageDomain, + stdlib.AccountCapabilityStorageDomain, +} + +var accountDomainsSet = func() map[string]struct{} { + m := make(map[string]struct{}) + for _, domain := range AccountDomains { + m[domain] = struct{}{} + } + return m +}() diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 5a7bb60bc9..55090be2be 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -23,7 +23,10 @@ import ( "encoding/hex" "fmt" "math/rand" + "slices" "sort" + "strconv" + "strings" "testing" "github.com/onflow/atree" @@ -36,6 +39,7 @@ import ( "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/common_utils" . "github.com/onflow/cadence/test_utils/interpreter_utils" . "github.com/onflow/cadence/test_utils/runtime_utils" @@ -67,10 +71,10 @@ func withWritesToStorage( var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - if storage.NewStorageMaps == nil { - storage.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + if storage.NewAccountStorageMapSlabIndices == nil { + storage.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} } - storage.NewStorageMaps.Set(storageKey, slabIndex) + storage.NewAccountStorageMapSlabIndices.Set(storageKey, slabIndex) } handler(storage, inter) @@ -197,16 +201,22 @@ func TestRuntimeStorageWrite(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // storage index to account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - []byte("storage"), + []byte(AccountStorageKey), }, // storage domain storage map + // NOTE: storage domain storage map is empty because it is inlined in account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, + // account storage map + { + []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, writes, ) @@ -1766,18 +1776,18 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { assert.Equal(t, []string{ // account 0x1: - // NOTE: with atree inlining, contract is inlined in contract map - // storage map (domain key + map slab) - // + contract map (domain key + map slab) + // NOTE: with account storage map and atree inlining, + // both storage domain storage map (with inlined storage data) + // and contract domain storage map (with inlined contract data) + // are inlined in account storage map. "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x02", - "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x04", - "\x00\x00\x00\x00\x00\x00\x00\x01|contract", - "\x00\x00\x00\x00\x00\x00\x00\x01|storage", + "\x00\x00\x00\x00\x00\x00\x00\x01|stored", // account 0x2 - // NOTE: with atree inlining, resource is inlined in storage map - // storage map (domain key + map slab) + // NOTE: with account storage map and atree inlining, + // storage domain storage map (with inlined resource) + // is inlined in account storage map. "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", - "\x00\x00\x00\x00\x00\x00\x00\x02|storage", + "\x00\x00\x00\x00\x00\x00\x00\x02|stored", }, nonEmptyKeys, ) @@ -3100,7 +3110,7 @@ func TestRuntimeStorageInternalAccess(t *testing.T) { }) require.NoError(t, err) - storageMap := storage.GetStorageMap(address, common.PathDomainStorage.Identifier(), false) + storageMap := storage.GetStorageMap(inter, address, common.PathDomainStorage.Identifier(), false) require.NotNil(t, storageMap) // Read first @@ -6229,3 +6239,1788 @@ func TestRuntimeStorageReferenceAccess(t *testing.T) { require.ErrorAs(t, err, &interpreter.DereferenceError{}) }) } + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[string]domainStorageMapValues +) + +func TestRuntimeStorageForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: empty storage + // migration: no migration + t.Run("read non-existent domain storage map", func(t *testing.T) { + + var writeCount int + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteCounter(&writeCount)) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domain := common.PathDomainStorage.Identifier() + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Check number of writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: storage containing + // - account register + // - account storage map + // - zero or more non-inlined domain storage map + // migration: no migraiton for new account. + createDomainTestCases := []struct { + name string + newDomains []string + domainStorageMapCount int + inlined bool + }{ + {name: "empty domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 0, inlined: true}, + {name: "small domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 10, inlined: true}, + {name: "large domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 20, inlined: false}, + } + + for _, tc := range createDomainTestCases { + t.Run("create "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteEntries(&writeEntries)) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + // Create new domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 2+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account register + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[0].Key) + require.Equal(t, []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Value) + + // writes[1]: account storage map + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[2+i]: domain storage map + + writeEntryIndex := 2 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], uint64(2+i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + // Domain storage map value is empty if it is inlined in account storage map + if tc.inlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test tests storage map operations with intermittent Commit(): + // - create domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("create, commit, write, commit, remove, commit", func(t *testing.T) { + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + + // Create empty domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForMigratedAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // newTestLedgerWithMigratedAccount creates a new TestLedger containing + // account storage map with given domains for given address. + newTestLedgerWithMigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []string, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := createAndWriteAccountStorageMap(t, storage, inter, address, domains, domainStorageMapCount, random) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + } + + nonexistentDomain := common.PathDomainPublic.Identifier() + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, nonexistentDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with new domain storage map. + createDomainTestCases := []struct { + name string + existingDomains []string + newDomains []string + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + lastIndex := ledger.StorageIndices[string(address[:])] + + random := rand.New(rand.NewSource(42)) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated to include new domains. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[1+i]: domain storage map + // domain storage map value is empty if it is inlined in account storage map + + writeEntryIndex := 1 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], lastIndex+1+uint64(i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + if tc.isNewDomainStorageMapInlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + existingDomains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Write to existing domain storage map (createIfNotExists is false) + for _, domain := range existingDomains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove existing elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated because inlined domain storage map is updated. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test tests storage map operations with intermittent Commit(): + // - read domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + nil, + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + newTestLedgerWithUnmigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []string, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreter(t) + + accountValues := make(accountStorageMapValues) + + random := rand.New(rand.NewSource(42)) + + for _, domain := range domains { + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain), domainStorageMapValueID[8:]) + require.NoError(t, err) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops. + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + } + + var writeCount int + + // Create storage with unmigrated accounts + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + nonexistingDomain := common.PathDomainPublic.Identifier() + domainStorageMap := storage.GetStorageMap(inter, address, nonexistingDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are no writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + var writeCount int + + existingDomains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Read elements to to domain storage map + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with existing and new domain storage map. + // migration: yes + createDomainTestCases := []struct { + name string + existingDomains []string + newDomains []string + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Create and write to new domain storage map + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + // writes include: + // - empty registers for all existing and new domains + // - 1 account register + // - 1 account storage map register + // - other non-inlined domain storage map + require.True(t, len(writeEntries) > 1+len(tc.existingDomains)+len(tc.newDomains)) + + i := 0 + for _, domain := range AccountDomains { + + if slices.Contains(tc.existingDomains, domain) || + slices.Contains(tc.newDomains, domain) { + + // Existing and new domain registers are removed. + // Removing new (non-existent) domain registers is no-op. + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(domain), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) == 0) + + i++ + } + } + + // Account register is created + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) > 0) + + i++ + + // Remaining writes are atree slabs (either empty for migrated domain storage map or non-empty for account storage map) + for ; i < len(writeEntries); i++ { + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, byte('$'), writeEntries[i].Key[0]) + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + // migration: yes + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage maps + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // write to existing domain storage map (createIfNotExists is false) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: domain register + // storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test test storage map operations (including account migration) with intermittent Commit() + // - read domain storage map and commit + // - write to domain storage map and commit (including account migration) + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []string{common.PathDomainStorage.Identifier()} + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + } + + // Update domain storage map, and commit changes (account is migrated during commmit) + { + // update existing domain storage map (loaded from storage) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update elements + for _, k := range domainKeys { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: storage domain register + // Storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // Account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + + writeEntries = nil + } + + // Remove all elements from domain storage map, and commit changes + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Remove elements + for _, k := range domainKeys { + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is modified because inlined domain storage map is modified. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +// TestRuntimeStorageDomainStorageMapInlinedState tests inlined state +// of domain storage map when large number of elements are inserted, +// updated, and removed from domain storage map. +// Initially domain storage map is inlined in account storage map, it +// becomes un-inlined when large number elements are inserted, and then +// inlined again when all elements are removed. +func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const domainStorageMapCount = 500 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + n := random.Int() + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +// TestRuntimeStorageLargeDomainValues tests large values +// in domain storage map. +func TestRuntimeStorageLargeDomainValues(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const domainStorageMapCount = 5 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + value := interpreter.NewUnmeteredStringValue(strings.Repeat("b", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + var writeCount int + + accountsInfo := []accountInfo{ + { + address: address, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts( + t, + nil, + LedgerOnWriteCounter(&writeCount), + accountsInfo, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountValues := accountsValues[address] + + // Create new domain storage map + const createIfNotExists = true + domain := stdlib.InboxStorageDomain + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + accountValues[domain] = make(domainStorageMapValues) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are writes to underlying storage + require.True(t, writeCount > 0) + + // Check there isn't any domain registers + nonAtreeRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) == 0 { + continue + } + ks := strings.Split(k, "|") + if ks[1][0] != '$' { + nonAtreeRegisters[k] = v + } + } + + require.Equal(t, 1, len(nonAtreeRegisters)) + for k := range nonAtreeRegisters { + ks := strings.Split(k, "|") + require.Equal(t, address[:], []byte(ks[0])) + require.Equal(t, AccountStorageKey, ks[1]) + } + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +// createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. +func createAndWriteAccountStorageMap( + t testing.TB, + storage *Storage, + inter *interpreter.Interpreter, + address common.Address, + domains []string, + count int, + random *rand.Rand, +) accountStorageMapValues { + + accountValues := make(accountStorageMapValues) + + // Create domain storage map + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, count, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + return accountValues +} + +func writeToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapData creates new storage with given storedValues, and compares account storage map values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + storedValues map[string][]byte, + storageIndices map[string]uint64, + address common.Address, + expectedAccountValues accountStorageMapValues, +) { + // Create storage with given storedValues and storageIndices + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(tb, storage) + + // Get account register + accountStorageMapSlabIndex, err := ledger.GetValue(address[:], []byte(AccountStorageKey)) + require.NoError(tb, err) + require.Equal(tb, 8, len(accountStorageMapSlabIndex)) + + // Load account storage map + accountSlabID := atree.NewSlabID( + atree.Address(address[:]), + atree.SlabIndex(accountStorageMapSlabIndex[:]), + ) + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountSlabID) + require.NotNil(tb, accountStorageMap) + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == "" { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Check values stored in domain storage map + for key, expectedValue := range expectedDomainValues { + value := domainStorageMap.ReadValue(nil, key) + + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + require.Equal(tb, len(expectedAccountValues), domainCount) + + // Check atree storage health + rootSlabIDs, err := atree.CheckStorageHealth(storage.PersistentSlabStorage, 1) + require.NoError(tb, err) + require.Equal(tb, 1, len(rootSlabIDs)) + require.Contains(tb, rootSlabIDs, accountSlabID) +} diff --git a/stdlib/account.go b/stdlib/account.go index 97a8e4d0f3..4ce0f6d65c 100644 --- a/stdlib/account.go +++ b/stdlib/account.go @@ -3253,6 +3253,7 @@ func recordStorageCapabilityController( storageMapKey := interpreter.StringStorageMapKey(identifier) storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, true, @@ -3295,6 +3296,7 @@ func getPathCapabilityIDSet( storageMapKey := interpreter.StringStorageMapKey(identifier) storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, false, @@ -3345,6 +3347,7 @@ func unrecordStorageCapabilityController( if capabilityIDSet.Count() == 0 { storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, true, @@ -3417,6 +3420,7 @@ func recordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, true, @@ -3444,6 +3448,7 @@ func unrecordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, true, @@ -3463,6 +3468,7 @@ func getAccountCapabilityControllerIDsIterator( count uint64, ) { storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, false, diff --git a/test_utils/interpreter_utils/interpreter.go b/test_utils/interpreter_utils/interpreter.go index 46a8182023..e92a5264dc 100644 --- a/test_utils/interpreter_utils/interpreter.go +++ b/test_utils/interpreter_utils/interpreter.go @@ -29,14 +29,26 @@ import ( func NewTestInterpreter(tb testing.TB) *interpreter.Interpreter { storage := NewUnmeteredInMemoryStorage() + return NewTestInterpreterWithStorage(tb, storage) +} + +func NewTestInterpreterWithStorage(tb testing.TB, storage interpreter.Storage) *interpreter.Interpreter { + return NewTestInterpreterWithStorageAndAtreeValidationConfig(tb, storage, true, true) +} +func NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb testing.TB, + storage interpreter.Storage, + atreeValueValidationEnabled bool, + atreeStorageValidationEnabled bool, +) *interpreter.Interpreter { inter, err := interpreter.NewInterpreter( nil, TestLocation, &interpreter.Config{ Storage: storage, - AtreeValueValidationEnabled: true, - AtreeStorageValidationEnabled: true, + AtreeValueValidationEnabled: atreeValueValidationEnabled, + AtreeStorageValidationEnabled: atreeStorageValidationEnabled, }, ) require.NoError(tb, err) diff --git a/test_utils/runtime_utils/storage.go b/test_utils/runtime_utils/storage.go new file mode 100644 index 0000000000..0c971d8c5d --- /dev/null +++ b/test_utils/runtime_utils/storage.go @@ -0,0 +1,42 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime_utils + +import ( + "testing" + + "github.com/onflow/atree" + "github.com/stretchr/testify/require" +) + +func CheckAtreeStorageHealth(tb testing.TB, storage atree.SlabStorage, expectedRootSlabIDs []atree.SlabID) { + rootSlabIDs, err := atree.CheckStorageHealth(storage, -1) + require.NoError(tb, err) + + nonTempRootSlabIDs := make([]atree.SlabID, 0, len(rootSlabIDs)) + + for rootSlabID := range rootSlabIDs { //nolint:maprange + if rootSlabID.HasTempAddress() { + continue + } + nonTempRootSlabIDs = append(nonTempRootSlabIDs, rootSlabID) + } + + require.ElementsMatch(tb, nonTempRootSlabIDs, expectedRootSlabIDs) +} diff --git a/test_utils/runtime_utils/testledger.go b/test_utils/runtime_utils/testledger.go index 4d4c846172..ef77d134f8 100644 --- a/test_utils/runtime_utils/testledger.go +++ b/test_utils/runtime_utils/testledger.go @@ -31,6 +31,7 @@ import ( type TestLedger struct { StoredValues map[string][]byte + StorageIndices map[string]uint64 OnValueExists func(owner, key []byte) (exists bool, err error) OnGetValue func(owner, key []byte) (value []byte, err error) OnSetValue func(owner, key, value []byte) (err error) @@ -92,9 +93,30 @@ func (s TestLedger) Dump() { } } +type LedgerOnRead func(owner, key, value []byte) +type LedgerOnWrite func(owner, key, value []byte) + +type OwnerKeyValue struct { + Owner, Key, Value []byte +} + +var LedgerOnWriteCounter = func(counter *int) LedgerOnWrite { + return func(_, _, _ []byte) { + (*counter)++ + } +} + +var LedgerOnWriteEntries = func(entries *[]OwnerKeyValue) LedgerOnWrite { + return func(owner, key, value []byte) { + *entries = append( + *entries, + OwnerKeyValue{Owner: owner, Key: key, Value: value}) + } +} + func NewTestLedger( - onRead func(owner, key, value []byte), - onWrite func(owner, key, value []byte), + onRead LedgerOnRead, + onWrite LedgerOnWrite, ) TestLedger { storedValues := map[string][]byte{} @@ -102,7 +124,8 @@ func NewTestLedger( storageIndices := map[string]uint64{} return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[TestStorageKey(string(owner), string(key))] return len(value) > 0, nil @@ -142,7 +165,8 @@ func NewTestLedgerWithData( } return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[storageKey(string(owner), string(key))] return len(value) > 0, nil diff --git a/tools/compatibility-check/go.mod b/tools/compatibility-check/go.mod index 628bfdc184..0a4813d95b 100644 --- a/tools/compatibility-check/go.mod +++ b/tools/compatibility-check/go.mod @@ -43,7 +43,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.8.0 // indirect + github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf // indirect github.com/onflow/crypto v0.25.2 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v1.3.3-0.20241017220455-79fdc6c8ba53 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect @@ -73,7 +73,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect go.opentelemetry.io/otel v1.24.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect diff --git a/tools/compatibility-check/go.sum b/tools/compatibility-check/go.sum index 3c453fa707..4ab363961b 100644 --- a/tools/compatibility-check/go.sum +++ b/tools/compatibility-check/go.sum @@ -278,7 +278,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -343,8 +342,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY= github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZnriEUE7bw9inC2AtdSn8lL/iwQLQ= @@ -480,11 +479,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=