diff --git a/cmd/decode-state-values/main.go b/cmd/decode-state-values/main.go
index 99eeda2497..91f3a6d840 100644
--- a/cmd/decode-state-values/main.go
+++ b/cmd/decode-state-values/main.go
@@ -234,8 +234,13 @@ type interpreterStorage struct {
 
 var _ interpreter.Storage = &interpreterStorage{}
 
-func (i interpreterStorage) GetStorageMap(_ common.Address, _ common.StorageDomain, _ bool) *interpreter.StorageMap {
-	panic("unexpected GetStorageMap call")
+func (i interpreterStorage) GetDomainStorageMap(
+	_ *interpreter.Interpreter,
+	_ common.Address,
+	_ common.StorageDomain,
+	_ bool,
+) *interpreter.DomainStorageMap {
+	panic("unexpected GetDomainStorageMap call")
 }
 
 func (i interpreterStorage) CheckHealth() error {
diff --git a/common/address.go b/common/address.go
index 5c1a354f59..04c2bade70 100644
--- a/common/address.go
+++ b/common/address.go
@@ -19,6 +19,7 @@
 package common
 
 import (
+	"bytes"
 	"encoding/hex"
 	goErrors "errors"
 	"fmt"
@@ -112,6 +113,10 @@ func (a Address) HexWithPrefix() string {
 	return fmt.Sprintf("0x%x", [AddressLength]byte(a))
 }
 
+func (a Address) Compare(other Address) int {
+	return bytes.Compare(a[:], other[:])
+}
+
 // HexToAddress converts a hex string to an Address after
 // ensuring that the hex string starts with the prefix 0x.
 func HexToAddressAssertPrefix(h string) (Address, error) {
diff --git a/common/storagedomain.go b/common/storagedomain.go
index 108196cdac..ac7b297daa 100644
--- a/common/storagedomain.go
+++ b/common/storagedomain.go
@@ -24,6 +24,19 @@ import (
 	"github.com/onflow/cadence/errors"
 )
 
+// StorageDomain is used to store domain values on chain.
+//
+// !!! *WARNING* !!!
+//
+// Only add new StorageDomain by:
+// - appending to the end.
+//
+// Only remove StorageDomain by:
+// - replacing existing StorageDomain with a placeholder `_`.
+//
+// DO *NOT* REPLACE EXISTING STORAGEDOMAIN!
+// DO *NOT* REMOVE EXISTING STORAGEDOMAIN!
+// DO *NOT* INSERT NEW STORAGEDOMAIN IN BETWEEN!
 type StorageDomain uint8
 
 const (
@@ -54,6 +67,8 @@ const (
 	// StorageDomainAccountCapability is the storage domain which
 	// records active account capability controller IDs
 	StorageDomainAccountCapability
+
+	// Append new StorageDomain here (if needed).
 )
 
 var AllStorageDomains = []StorageDomain{
diff --git a/go.mod b/go.mod
index 6193bce06e..3412ef864a 100644
--- a/go.mod
+++ b/go.mod
@@ -13,10 +13,10 @@ require (
 	github.com/kr/pretty v0.3.1
 	github.com/leanovate/gopter v0.2.9
 	github.com/logrusorgru/aurora/v4 v4.0.0
-	github.com/onflow/atree v0.8.0
+	github.com/onflow/atree v0.8.1
 	github.com/rivo/uniseg v0.4.4
 	github.com/schollz/progressbar/v3 v3.13.1
-	github.com/stretchr/testify v1.9.0
+	github.com/stretchr/testify v1.10.0
 	github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c
 	github.com/tidwall/pretty v1.2.1
 	github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d
@@ -54,7 +54,7 @@ require (
 	github.com/rogpeppe/go-internal v1.9.0 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
 	github.com/zeebo/assert v1.3.0 // indirect
-	github.com/zeebo/blake3 v0.2.3 // indirect
+	github.com/zeebo/blake3 v0.2.4 // indirect
 	golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
 	golang.org/x/sync v0.8.0 // indirect
 	golang.org/x/sys v0.26.0 // indirect
diff --git a/go.sum b/go.sum
index cc2c3058d0..4cbe6e714d 100644
--- a/go.sum
+++ b/go.sum
@@ -37,7 +37,6 @@ github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd
 github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
 github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs=
 github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA=
-github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
 github.com/klauspost/cpuid/v2 v2.2.0 h1:4ZexSFt8agMNzNisrsilL6RClWDC5YJnLHNIfTy4iuc=
 github.com/klauspost/cpuid/v2 v2.2.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
 github.com/kodova/html-to-markdown v1.0.1 h1:MJxQAnqxtss3DaPnm72DRV65HZiMQZF3DUAfEaTg+14=
@@ -75,8 +74,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em
 github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0=
-github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo=
+github.com/onflow/atree v0.8.1 h1:DAnPnL9/Ks3LaAnkQVokokTBG/znTW0DJfovDtJDhLI=
+github.com/onflow/atree v0.8.1/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck=
 github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg=
 github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI=
 github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
@@ -98,8 +97,8 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
 github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg=
 github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo=
 github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
@@ -108,11 +107,10 @@ github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX
 github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog=
 github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
 github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
-github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
 github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
 github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
-github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
-github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
 github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
 github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
 go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg=
diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go
new file mode 100644
index 0000000000..25e1d881a1
--- /dev/null
+++ b/interpreter/account_storagemap.go
@@ -0,0 +1,346 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package interpreter
+
+import (
+	goerrors "errors"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+)
+
+// AccountStorageMap stores domain storage maps in an account.
+type AccountStorageMap struct {
+	orderedMap *atree.OrderedMap
+}
+
+// NewAccountStorageMap creates account storage map.
+func NewAccountStorageMap(
+	memoryGauge common.MemoryGauge,
+	storage atree.SlabStorage,
+	address atree.Address,
+) *AccountStorageMap {
+	common.UseMemory(memoryGauge, common.StorageMapMemoryUsage)
+
+	orderedMap, err := atree.NewMap(
+		storage,
+		address,
+		atree.NewDefaultDigesterBuilder(),
+		emptyTypeInfo,
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	return &AccountStorageMap{
+		orderedMap: orderedMap,
+	}
+}
+
+// NewAccountStorageMapWithRootID loads existing account storage map with given atree SlabID.
+func NewAccountStorageMapWithRootID(
+	storage atree.SlabStorage,
+	slabID atree.SlabID,
+) *AccountStorageMap {
+	orderedMap, err := atree.NewMapWithRootID(
+		storage,
+		slabID,
+		atree.NewDefaultDigesterBuilder(),
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	return &AccountStorageMap{
+		orderedMap: orderedMap,
+	}
+}
+
+// DomainExists returns true if the given domain exists in the account storage map.
+func (s *AccountStorageMap) DomainExists(domain common.StorageDomain) bool {
+	key := Uint64StorageMapKey(domain)
+
+	exists, err := s.orderedMap.Has(
+		key.AtreeValueCompare,
+		key.AtreeValueHashInput,
+		key.AtreeValue(),
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	return exists
+}
+
+// GetDomain returns domain storage map for the given domain.
+// If createIfNotExists is true and domain doesn't exist, new domain storage map
+// is created and inserted into account storage map with given domain as key.
+func (s *AccountStorageMap) GetDomain(
+	gauge common.MemoryGauge,
+	interpreter *Interpreter,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) *DomainStorageMap {
+	key := Uint64StorageMapKey(domain)
+
+	storedValue, err := s.orderedMap.Get(
+		key.AtreeValueCompare,
+		key.AtreeValueHashInput,
+		key.AtreeValue(),
+	)
+	if err != nil {
+		var keyNotFoundError *atree.KeyNotFoundError
+		if goerrors.As(err, &keyNotFoundError) {
+			// Create domain storage map if needed.
+
+			if createIfNotExists {
+				return s.NewDomain(gauge, interpreter, domain)
+			}
+
+			return nil
+		}
+
+		panic(errors.NewExternalError(err))
+	}
+
+	// Create domain storage map from raw atree value.
+	return NewDomainStorageMapWithAtreeValue(storedValue)
+}
+
+// NewDomain creates new domain storage map and inserts it to AccountStorageMap with given domain as key.
+func (s *AccountStorageMap) NewDomain(
+	gauge common.MemoryGauge,
+	interpreter *Interpreter,
+	domain common.StorageDomain,
+) *DomainStorageMap {
+	interpreter.recordStorageMutation()
+
+	domainStorageMap := NewDomainStorageMap(gauge, s.orderedMap.Storage, s.orderedMap.Address())
+
+	key := Uint64StorageMapKey(domain)
+
+	existingStorable, err := s.orderedMap.Set(
+		key.AtreeValueCompare,
+		key.AtreeValueHashInput,
+		key.AtreeValue(),
+		domainStorageMap.orderedMap,
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+	if existingStorable != nil {
+		panic(errors.NewUnexpectedError(
+			"account %x domain %s should not exist",
+			s.orderedMap.Address(),
+			domain.Identifier(),
+		))
+	}
+
+	return domainStorageMap
+}
+
+// WriteDomain sets or removes domain storage map in account storage map.
+// If the given storage map is nil, domain is removed.
+// If the given storage map is non-nil, domain is added/updated.
+// Returns true if domain storage map previously existed at the given domain.
+func (s *AccountStorageMap) WriteDomain(
+	interpreter *Interpreter,
+	domain common.StorageDomain,
+	domainStorageMap *DomainStorageMap,
+) (existed bool) {
+	if domainStorageMap == nil {
+		return s.removeDomain(interpreter, domain)
+	}
+	return s.setDomain(interpreter, domain, domainStorageMap)
+}
+
+// setDomain sets domain storage map in the account storage map and returns true if domain previously existed.
+// If the given domain already stores a domain storage map, it is overwritten.
+func (s *AccountStorageMap) setDomain(
+	interpreter *Interpreter,
+	domain common.StorageDomain,
+	newDomainStorageMap *DomainStorageMap,
+) (existed bool) {
+	interpreter.recordStorageMutation()
+
+	key := Uint64StorageMapKey(domain)
+
+	existingValueStorable, err := s.orderedMap.Set(
+		key.AtreeValueCompare,
+		key.AtreeValueHashInput,
+		key.AtreeValue(),
+		newDomainStorageMap.orderedMap,
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	existed = existingValueStorable != nil
+	if existed {
+		// Create domain storage map from overwritten storable
+		existingDomainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable)
+
+		// Deep remove elements in domain storage map
+		existingDomainStorageMap.DeepRemove(interpreter, true)
+
+		// Remove domain storage map slab
+		interpreter.RemoveReferencedSlab(existingValueStorable)
+	}
+
+	interpreter.maybeValidateAtreeValue(s.orderedMap)
+
+	// NOTE: Don't call maybeValidateAtreeStorage() here because it is possible
+	// that domain storage map is in the process of being migrated to account
+	// storage map and state isn't consistent during migration.
+
+	return
+}
+
+// removeDomain removes domain storage map with given domain in account storage map, if it exists.
+func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain common.StorageDomain) (existed bool) {
+	interpreter.recordStorageMutation()
+
+	key := Uint64StorageMapKey(domain)
+
+	existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove(
+		key.AtreeValueCompare,
+		key.AtreeValueHashInput,
+		key.AtreeValue(),
+	)
+	if err != nil {
+		var keyNotFoundError *atree.KeyNotFoundError
+		if goerrors.As(err, &keyNotFoundError) {
+			// No-op to remove non-existent domain.
+			return
+		}
+		panic(errors.NewExternalError(err))
+	}
+
+	// Key
+
+	// NOTE: Key is just an atree.Value (Uint64AtreeValue), not an interpreter.Value,
+	// so do not need (can) convert and not need to deep remove
+	interpreter.RemoveReferencedSlab(existingKeyStorable)
+
+	// Value
+
+	existed = existingValueStorable != nil
+	if existed {
+		// Create domain storage map from removed storable
+		domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable)
+
+		// Deep remove elements in domain storage map
+		domainStorageMap.DeepRemove(interpreter, true)
+
+		// Remove domain storage map slab
+		interpreter.RemoveReferencedSlab(existingValueStorable)
+	}
+
+	interpreter.maybeValidateAtreeValue(s.orderedMap)
+	interpreter.maybeValidateAtreeStorage()
+
+	return
+}
+
+func (s *AccountStorageMap) SlabID() atree.SlabID {
+	return s.orderedMap.SlabID()
+}
+
+func (s *AccountStorageMap) Count() uint64 {
+	return s.orderedMap.Count()
+}
+
+// Domains returns a set of domains in account storage map
+func (s *AccountStorageMap) Domains() map[common.StorageDomain]struct{} {
+	domains := make(map[common.StorageDomain]struct{})
+
+	iterator := s.Iterator()
+
+	for {
+		k, err := iterator.mapIterator.NextKey()
+		if err != nil {
+			panic(errors.NewExternalError(err))
+		}
+
+		if k == nil {
+			break
+		}
+
+		domain := convertAccountStorageMapKeyToStorageDomain(k)
+		domains[domain] = struct{}{}
+	}
+
+	return domains
+}
+
+// Iterator returns a mutable iterator (AccountStorageMapIterator),
+// which allows iterating over the domain and domain storage map.
+func (s *AccountStorageMap) Iterator() *AccountStorageMapIterator {
+	mapIterator, err := s.orderedMap.Iterator(
+		StorageMapKeyAtreeValueComparator,
+		StorageMapKeyAtreeValueHashInput,
+	)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	return &AccountStorageMapIterator{
+		mapIterator: mapIterator,
+		storage:     s.orderedMap.Storage,
+	}
+}
+
+// AccountStorageMapIterator is an iterator over AccountStorageMap.
+type AccountStorageMapIterator struct {
+	mapIterator atree.MapIterator
+	storage     atree.SlabStorage
+}
+
+// Next returns the next domain and domain storage map.
+// If there is no more domain, (common.StorageDomainUnknown, nil) is returned.
+func (i *AccountStorageMapIterator) Next() (common.StorageDomain, *DomainStorageMap) {
+	k, v, err := i.mapIterator.Next()
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	if k == nil || v == nil {
+		return common.StorageDomainUnknown, nil
+	}
+
+	key := convertAccountStorageMapKeyToStorageDomain(k)
+
+	value := NewDomainStorageMapWithAtreeValue(v)
+
+	return key, value
+}
+
+func convertAccountStorageMapKeyToStorageDomain(v atree.Value) common.StorageDomain {
+	key, ok := v.(Uint64AtreeValue)
+	if !ok {
+		panic(errors.NewUnexpectedError("domain key type %T isn't expected", key))
+	}
+	domain, err := common.StorageDomainFromUint64(uint64(key))
+	if err != nil {
+		panic(errors.NewUnexpectedError("domain key %d isn't expected: %w", key, err))
+	}
+	return domain
+}
diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go
new file mode 100644
index 0000000000..a2939e14ac
--- /dev/null
+++ b/interpreter/account_storagemap_test.go
@@ -0,0 +1,942 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package interpreter_test
+
+import (
+	"math/rand"
+	goruntime "runtime"
+	"slices"
+	"strconv"
+	"strings"
+	"testing"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/interpreter"
+	"github.com/onflow/cadence/runtime"
+	. "github.com/onflow/cadence/test_utils/interpreter_utils"
+	. "github.com/onflow/cadence/test_utils/runtime_utils"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestAccountStorageMapDomainExists(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		for _, domain := range common.AllStorageDomains {
+			exist := accountStorageMap.DomainExists(domain)
+			require.False(t, exist)
+		}
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+		const count = 10
+		accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		// Check if domain exists
+		for _, domain := range common.AllStorageDomains {
+			exist := accountStorageMap.DomainExists(domain)
+			require.Equal(t, slices.Contains(existingDomains, domain), exist)
+		}
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+}
+
+func TestAccountStorageMapGetDomain(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		for _, domain := range common.AllStorageDomains {
+			const createIfNotExists = false
+			domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists)
+			require.Nil(t, domainStorageMap)
+		}
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+		const count = 10
+		accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		for _, domain := range common.AllStorageDomains {
+			const createIfNotExists = false
+			domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists)
+			require.Equal(t, slices.Contains(existingDomains, domain), domainStorageMap != nil)
+
+			if domainStorageMap != nil {
+				checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain])
+			}
+		}
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+}
+
+func TestAccountStorageMapCreateDomain(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		accountValues := make(accountStorageMapValues)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		for _, domain := range common.AllStorageDomains {
+			const createIfNotExists = true
+			domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists)
+			require.NotNil(t, domainStorageMap)
+			require.Equal(t, uint64(0), domainStorageMap.Count())
+
+			accountValues[domain] = make(domainStorageMapValues)
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+		const count = 10
+		accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		for _, domain := range common.AllStorageDomains {
+			const createIfNotExists = true
+			domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists)
+			require.NotNil(t, domainStorageMap)
+			require.Equal(t, uint64(len(accountValues[domain])), domainStorageMap.Count())
+
+			if !slices.Contains(existingDomains, domain) {
+				accountValues[domain] = make(domainStorageMapValues)
+			}
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+}
+
+func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		accountValues := make(accountStorageMapValues)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		const count = 10
+		for _, domain := range common.AllStorageDomains {
+
+			domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+			domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random)
+
+			existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap)
+			require.False(t, existed)
+
+			accountValues[domain] = domainValues
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+		const count = 10
+		accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		for _, domain := range common.AllStorageDomains {
+
+			domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+			domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random)
+
+			existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap)
+			require.Equal(t, slices.Contains(existingDomains, domain), existed)
+
+			accountValues[domain] = domainValues
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+}
+
+func TestAccountStorageMapRemoveDomain(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		accountValues := make(accountStorageMapValues)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		for _, domain := range common.AllStorageDomains {
+			existed := accountStorageMap.WriteDomain(inter, domain, nil)
+			require.False(t, existed)
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+		const count = 10
+		accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		accountStorageMapRootSlabID := accountStorageMap.SlabID()
+
+		for _, domain := range common.AllStorageDomains {
+
+			existed := accountStorageMap.WriteDomain(inter, domain, nil)
+			require.Equal(t, slices.Contains(existingDomains, domain), existed)
+
+			delete(accountValues, domain)
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID})
+
+		err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+}
+
+func TestAccountStorageMapIterator(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		accountValues := make(accountStorageMapValues)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		iterator := accountStorageMap.Iterator()
+
+		// Test calling Next() twice on empty account storage map.
+		for range 2 {
+			domain, domainStorageMap := iterator.Next()
+			require.Empty(t, domain)
+			require.Nil(t, domainStorageMap)
+		}
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		existingDomains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+			common.PathDomainPublic.StorageDomain(),
+		}
+
+		const count = 10
+		accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		iterator := accountStorageMap.Iterator()
+
+		domainCount := 0
+		for {
+			domain, domainStorageMap := iterator.Next()
+			if domain == common.StorageDomainUnknown {
+				break
+			}
+
+			domainCount++
+
+			require.True(t, slices.Contains(existingDomains, domain))
+			require.NotNil(t, domainStorageMap)
+
+			checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain])
+		}
+
+		// Test calling Next() after iterator reaches the end.
+		domain, domainStorageMap := iterator.Next()
+		require.Equal(t, common.StorageDomainUnknown, domain)
+		require.Nil(t, domainStorageMap)
+
+		require.Equal(t, len(existingDomains), domainCount)
+
+		checkAccountStorageMapData(t, inter, accountStorageMap, accountValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+}
+
+func TestAccountStorageMapDomains(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, accountStorageMap)
+		require.Equal(t, uint64(0), accountStorageMap.Count())
+
+		domains := accountStorageMap.Domains()
+		require.Equal(t, 0, len(domains))
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		// Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly.
+		// This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled)
+
+		existingDomains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+			common.PathDomainPublic.StorageDomain(),
+			common.PathDomainPrivate.StorageDomain(),
+		}
+
+		const count = 10
+		accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+		domains := accountStorageMap.Domains()
+		require.Equal(t, len(existingDomains), len(domains))
+
+		for _, domain := range existingDomains {
+			_, exist := domains[domain]
+			require.True(t, exist)
+		}
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()})
+	})
+}
+
+func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) {
+			ledger := NewTestLedger(nil, nil)
+			storage := runtime.NewStorage(
+				ledger,
+				nil,
+				runtime.StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+			require.NotNil(t, accountStorageMap)
+			require.Equal(t, uint64(0), accountStorageMap.Count())
+
+			err := storage.Commit(inter, false)
+			require.NoError(t, err)
+
+			return accountStorageMap.SlabID(), make(accountStorageMapValues), ledger.StoredValues, ledger.StorageIndices
+		}
+
+		accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init()
+
+		checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		existingDomains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+			common.PathDomainPublic.StorageDomain(),
+			common.PathDomainPrivate.StorageDomain(),
+		}
+
+		init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) {
+			random := rand.New(rand.NewSource(42))
+
+			ledger := NewTestLedger(nil, nil)
+			storage := runtime.NewStorage(
+				ledger,
+				nil,
+				runtime.StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			// Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly.
+			// This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab.
+			const atreeValueValidationEnabled = true
+			const atreeStorageValidationEnabled = false
+			inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled)
+
+			const count = 10
+			accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random)
+
+			err := storage.Commit(inter, false)
+			require.NoError(t, err)
+
+			return accountStorageMap.SlabID(), accountValues, ledger.StoredValues, ledger.StorageIndices
+		}
+
+		accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init()
+
+		checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues)
+	})
+}
+
+type (
+	domainStorageMapValues  map[interpreter.StorageMapKey]interpreter.Value
+	accountStorageMapValues map[common.StorageDomain]domainStorageMapValues
+)
+
+func createAccountStorageMap(
+	storage atree.SlabStorage,
+	inter *interpreter.Interpreter,
+	address common.Address,
+	domains []common.StorageDomain,
+	count int,
+	random *rand.Rand,
+) (*interpreter.AccountStorageMap, accountStorageMapValues) {
+
+	// Create account storage map
+	accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+
+	accountValues := make(accountStorageMapValues)
+
+	for _, domain := range domains {
+		// Create domain storage map
+		domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain)
+
+		// Write to new domain storage map
+		domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random)
+
+		accountValues[domain] = domainValues
+	}
+
+	return accountStorageMap, accountValues
+}
+
+func writeRandomValuesToDomainStorageMap(
+	inter *interpreter.Interpreter,
+	domainStorageMap *interpreter.DomainStorageMap,
+	count int,
+	random *rand.Rand,
+) domainStorageMapValues {
+
+	domainValues := make(domainStorageMapValues)
+
+	for len(domainValues) < count {
+		n := random.Int()
+
+		key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+
+		var value interpreter.Value
+
+		if len(domainValues) == 0 {
+			// First element is a large value that is stored in its own slabs.
+			value = interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000))
+		} else {
+			value = interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+		}
+
+		domainStorageMap.WriteValue(inter, key, value)
+
+		domainValues[key] = value
+	}
+
+	return domainValues
+}
+
+// checkAccountStorageMapDataWithRawData checks loaded account storage map against expected account values.
+func checkAccountStorageMapDataWithRawData(
+	tb testing.TB,
+	storedValues map[string][]byte,
+	storageIndices map[string]uint64,
+	rootSlabID atree.SlabID,
+	expectedAccountValues accountStorageMapValues,
+) {
+	// Create new storage from raw data
+	ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices)
+	storage := runtime.NewStorage(
+		ledger,
+		nil,
+		runtime.StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	inter := NewTestInterpreterWithStorage(tb, storage)
+
+	loadedAccountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, rootSlabID)
+	require.Equal(tb, uint64(len(expectedAccountValues)), loadedAccountStorageMap.Count())
+	require.Equal(tb, rootSlabID, loadedAccountStorageMap.SlabID())
+
+	checkAccountStorageMapData(tb, inter, loadedAccountStorageMap, expectedAccountValues)
+
+	CheckAtreeStorageHealth(tb, storage, []atree.SlabID{rootSlabID})
+}
+
+// checkAccountStorageMapData iterates account storage map and compares values with given expectedAccountValues.
+func checkAccountStorageMapData(
+	tb testing.TB,
+	inter *interpreter.Interpreter,
+	accountStorageMap *interpreter.AccountStorageMap,
+	expectedAccountValues accountStorageMapValues,
+) {
+	require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count())
+
+	domainCount := 0
+	iter := accountStorageMap.Iterator()
+	for {
+		domain, domainStorageMap := iter.Next()
+		if domain == common.StorageDomainUnknown {
+			break
+		}
+
+		domainCount++
+
+		expectedDomainValues, exist := expectedAccountValues[domain]
+		require.True(tb, exist)
+
+		checkDomainStorageMapData(tb, inter, domainStorageMap, expectedDomainValues)
+	}
+
+	require.Equal(tb, len(expectedAccountValues), domainCount)
+}
+
+// checkDomainStorageMapData iterates domain storage map and compares values with given expectedDomainValues.
+func checkDomainStorageMapData(
+	tb testing.TB,
+	inter *interpreter.Interpreter,
+	domainStorageMap *interpreter.DomainStorageMap,
+	expectedDomainValues domainStorageMapValues,
+) {
+	require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count())
+
+	count := 0
+	iter := domainStorageMap.Iterator(nil)
+	for {
+		k, v := iter.Next()
+		if k == nil {
+			break
+		}
+
+		count++
+
+		kv := k.(interpreter.StringAtreeValue)
+
+		expectedValue := expectedDomainValues[interpreter.StringStorageMapKey(kv)]
+
+		checkCadenceValue(tb, inter, v, expectedValue)
+	}
+
+	require.Equal(tb, len(expectedDomainValues), count)
+}
+
+func checkCadenceValue(
+	tb testing.TB,
+	inter *interpreter.Interpreter,
+	value,
+	expectedValue interpreter.Value,
+) {
+	ev, ok := value.(interpreter.EquatableValue)
+	require.True(tb, ok)
+	require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue))
+}
diff --git a/interpreter/account_test.go b/interpreter/account_test.go
index 595d0d7352..8f1f9c4214 100644
--- a/interpreter/account_test.go
+++ b/interpreter/account_test.go
@@ -483,7 +483,7 @@ func testAccountWithErrorHandler(
 	getAccountValues := func() map[storageKey]interpreter.Value {
 		accountValues := make(map[storageKey]interpreter.Value)
 
-		for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).StorageMaps {
+		for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).DomainStorageMaps {
 			iterator := accountStorage.Iterator(inter)
 			for {
 				key, value := iterator.Next()
diff --git a/interpreter/storagemap.go b/interpreter/domain_storagemap.go
similarity index 53%
rename from interpreter/storagemap.go
rename to interpreter/domain_storagemap.go
index 54c9e2acbb..e8f79bca35 100644
--- a/interpreter/storagemap.go
+++ b/interpreter/domain_storagemap.go
@@ -20,6 +20,7 @@ package interpreter
 
 import (
 	goerrors "errors"
+	"time"
 
 	"github.com/onflow/atree"
 
@@ -27,12 +28,13 @@ import (
 	"github.com/onflow/cadence/errors"
 )
 
-// StorageMap is an ordered map which stores values in an account.
-type StorageMap struct {
+// DomainStorageMap is an ordered map which stores values in an account domain.
+type DomainStorageMap struct {
 	orderedMap *atree.OrderedMap
 }
 
-func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *StorageMap {
+// NewDomainStorageMap creates new domain storage map for given address.
+func NewDomainStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *DomainStorageMap {
 	common.UseMemory(memoryGauge, common.StorageMapMemoryUsage)
 
 	orderedMap, err := atree.NewMap(
@@ -45,12 +47,16 @@ func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, ad
 		panic(errors.NewExternalError(err))
 	}
 
-	return &StorageMap{
+	return &DomainStorageMap{
 		orderedMap: orderedMap,
 	}
 }
 
-func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *StorageMap {
+// NewDomainStorageMapWithRootID loads domain storage map with given slabID.
+// This function is only used with legacy domain registers for unmigrated accounts.
+// For migrated accounts, NewDomainStorageMapWithAtreeValue() is used to load
+// domain storage map as an element of AccountStorageMap.
+func NewDomainStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *DomainStorageMap {
 	orderedMap, err := atree.NewMapWithRootID(
 		storage,
 		slabID,
@@ -60,13 +66,51 @@ func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *St
 		panic(errors.NewExternalError(err))
 	}
 
-	return &StorageMap{
+	return &DomainStorageMap{
 		orderedMap: orderedMap,
 	}
 }
 
+// newDomainStorageMapWithAtreeStorable loads domain storage map with given atree.Storable.
+func newDomainStorageMapWithAtreeStorable(storage atree.SlabStorage, storable atree.Storable) *DomainStorageMap {
+
+	// NOTE: Don't use interpreter.StoredValue() to convert given storable
+	// to DomainStorageMap because DomainStorageMap isn't interpreter.Value.
+
+	value, err := storable.StoredValue(storage)
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	return NewDomainStorageMapWithAtreeValue(value)
+}
+
+// NewDomainStorageMapWithAtreeValue loads domain storage map with given atree.Value.
+// This function is used by migrated account to load domain as an element of AccountStorageMap.
+func NewDomainStorageMapWithAtreeValue(value atree.Value) *DomainStorageMap {
+	// Check if type of given value is *atree.OrderedMap
+	dm, isAtreeOrderedMap := value.(*atree.OrderedMap)
+	if !isAtreeOrderedMap {
+		panic(errors.NewUnexpectedError(
+			"domain storage map has unexpected type %T, expect *atree.OrderedMap",
+			value,
+		))
+	}
+
+	// Check if TypeInfo of atree.OrderedMap is EmptyTypeInfo
+	dt, isEmptyTypeInfo := dm.Type().(EmptyTypeInfo)
+	if !isEmptyTypeInfo {
+		panic(errors.NewUnexpectedError(
+			"domain storage map has unexpected encoded type %T, expect EmptyTypeInfo",
+			dt,
+		))
+	}
+
+	return &DomainStorageMap{orderedMap: dm}
+}
+
 // ValueExists returns true if the given key exists in the storage map.
-func (s StorageMap) ValueExists(key StorageMapKey) bool {
+func (s *DomainStorageMap) ValueExists(key StorageMapKey) bool {
 	exists, err := s.orderedMap.Has(
 		key.AtreeValueCompare,
 		key.AtreeValueHashInput,
@@ -81,7 +125,7 @@ func (s StorageMap) ValueExists(key StorageMapKey) bool {
 
 // ReadValue returns the value for the given key.
 // Returns nil if the key does not exist.
-func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value {
+func (s *DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value {
 	storedValue, err := s.orderedMap.Get(
 		key.AtreeValueCompare,
 		key.AtreeValueHashInput,
@@ -102,7 +146,7 @@ func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value
 // If the given value is nil, the key is removed.
 // If the given value is non-nil, the key is added/updated.
 // Returns true if a value previously existed at the given key.
-func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) {
+func (s *DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) {
 	if value == nil {
 		return s.RemoveValue(interpreter, key)
 	} else {
@@ -112,8 +156,8 @@ func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, valu
 
 // SetValue sets a value in the storage map.
 // If the given key already stores a value, it is overwritten.
-// Returns true if
-func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) {
+// Returns true if given key already exists and existing value is overwritten.
+func (s *DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) {
 	interpreter.recordStorageMutation()
 
 	existingStorable, err := s.orderedMap.Set(
@@ -126,20 +170,21 @@ func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value
 		panic(errors.NewExternalError(err))
 	}
 
-	interpreter.maybeValidateAtreeValue(s.orderedMap)
-	interpreter.maybeValidateAtreeStorage()
-
 	existed = existingStorable != nil
 	if existed {
 		existingValue := StoredValue(interpreter, existingStorable, interpreter.Storage())
 		existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was overwritten in parent container.
 		interpreter.RemoveReferencedSlab(existingStorable)
 	}
+
+	interpreter.maybeValidateAtreeValue(s.orderedMap)
+	interpreter.maybeValidateAtreeStorage()
+
 	return
 }
 
 // RemoveValue removes a value in the storage map, if it exists.
-func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) {
+func (s *DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) {
 	interpreter.recordStorageMutation()
 
 	existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove(
@@ -155,9 +200,6 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex
 		panic(errors.NewExternalError(err))
 	}
 
-	interpreter.maybeValidateAtreeValue(s.orderedMap)
-	interpreter.maybeValidateAtreeStorage()
-
 	// Key
 
 	// NOTE: Key is just an atree.Value, not an interpreter.Value,
@@ -172,12 +214,82 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex
 		existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was removed from parent container.
 		interpreter.RemoveReferencedSlab(existingValueStorable)
 	}
+
+	interpreter.maybeValidateAtreeValue(s.orderedMap)
+	interpreter.maybeValidateAtreeStorage()
+
 	return
 }
 
+// DeepRemove removes all elements (and their slabs) of domain storage map.
+func (s *DomainStorageMap) DeepRemove(interpreter *Interpreter, hasNoParentContainer bool) {
+
+	config := interpreter.SharedState.Config
+
+	if config.TracingEnabled {
+		startTime := time.Now()
+
+		typeInfo := "DomainStorageMap"
+		count := s.Count()
+
+		defer func() {
+			interpreter.reportDomainStorageMapDeepRemoveTrace(
+				typeInfo,
+				int(count),
+				time.Since(startTime),
+			)
+		}()
+	}
+
+	// Remove nested values and storables
+
+	// Remove keys and values
+
+	storage := s.orderedMap.Storage
+
+	err := s.orderedMap.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) {
+		// Key
+
+		// NOTE: Key is just an atree.Value, not an interpreter.Value,
+		// so do not need (can) convert and not need to deep remove
+		interpreter.RemoveReferencedSlab(keyStorable)
+
+		// Value
+
+		value := StoredValue(interpreter, valueStorable, storage)
+		value.DeepRemove(interpreter, false) // value is an element of v.dictionary because it is from PopIterate() callback.
+		interpreter.RemoveReferencedSlab(valueStorable)
+	})
+	if err != nil {
+		panic(errors.NewExternalError(err))
+	}
+
+	interpreter.maybeValidateAtreeValue(s.orderedMap)
+	if hasNoParentContainer {
+		interpreter.maybeValidateAtreeStorage()
+	}
+}
+
+func (s *DomainStorageMap) SlabID() atree.SlabID {
+	return s.orderedMap.SlabID()
+}
+
+func (s *DomainStorageMap) ValueID() atree.ValueID {
+	return s.orderedMap.ValueID()
+}
+
+func (s *DomainStorageMap) Count() uint64 {
+	return s.orderedMap.Count()
+}
+
+func (s *DomainStorageMap) Inlined() bool {
+	// This is only used for testing currently.
+	return s.orderedMap.Inlined()
+}
+
 // Iterator returns an iterator (StorageMapIterator),
 // which allows iterating over the keys and values of the storage map
-func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator {
+func (s *DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator {
 	mapIterator, err := s.orderedMap.Iterator(
 		StorageMapKeyAtreeValueComparator,
 		StorageMapKeyAtreeValueHashInput,
@@ -186,31 +298,23 @@ func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator {
 		panic(errors.NewExternalError(err))
 	}
 
-	return StorageMapIterator{
+	return DomainStorageMapIterator{
 		gauge:       gauge,
 		mapIterator: mapIterator,
 		storage:     s.orderedMap.Storage,
 	}
 }
 
-func (s StorageMap) SlabID() atree.SlabID {
-	return s.orderedMap.SlabID()
-}
-
-func (s StorageMap) Count() uint64 {
-	return s.orderedMap.Count()
-}
-
-// StorageMapIterator is an iterator over StorageMap
-type StorageMapIterator struct {
+// DomainStorageMapIterator is an iterator over DomainStorageMap
+type DomainStorageMapIterator struct {
 	gauge       common.MemoryGauge
 	mapIterator atree.MapIterator
 	storage     atree.SlabStorage
 }
 
 // Next returns the next key and value of the storage map iterator.
-// If there is no further key-value pair, ("", nil) is returned.
-func (i StorageMapIterator) Next() (atree.Value, Value) {
+// If there is no further key-value pair, (nil, nil) is returned.
+func (i DomainStorageMapIterator) Next() (atree.Value, Value) {
 	k, v, err := i.mapIterator.Next()
 	if err != nil {
 		panic(errors.NewExternalError(err))
@@ -230,7 +334,7 @@ func (i StorageMapIterator) Next() (atree.Value, Value) {
 
 // NextKey returns the next key of the storage map iterator.
 // If there is no further key, "" is returned.
-func (i StorageMapIterator) NextKey() atree.Value {
+func (i DomainStorageMapIterator) NextKey() atree.Value {
 	k, err := i.mapIterator.NextKey()
 	if err != nil {
 		panic(errors.NewExternalError(err))
@@ -240,8 +344,8 @@ func (i StorageMapIterator) NextKey() atree.Value {
 }
 
 // NextValue returns the next value in the storage map iterator.
-// If there is nop further value, nil is returned.
-func (i StorageMapIterator) NextValue() Value {
+// If there is no further value, nil is returned.
+func (i DomainStorageMapIterator) NextValue() Value {
 	v, err := i.mapIterator.NextValue()
 	if err != nil {
 		panic(errors.NewExternalError(err))
diff --git a/interpreter/domain_storagemap_test.go b/interpreter/domain_storagemap_test.go
new file mode 100644
index 0000000000..5eb2db5c66
--- /dev/null
+++ b/interpreter/domain_storagemap_test.go
@@ -0,0 +1,814 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package interpreter_test
+
+import (
+	"math/rand"
+	"strconv"
+	"testing"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/interpreter"
+	"github.com/onflow/cadence/runtime"
+	. "github.com/onflow/cadence/test_utils/interpreter_utils"
+	. "github.com/onflow/cadence/test_utils/runtime_utils"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestDomainStorageMapValueExists(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		key := interpreter.StringAtreeValue("key")
+		exist := domainStorageMap.ValueExists(interpreter.StringStorageMapKey(key))
+		require.False(t, exist)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match DomainStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		// Check if value exists
+		for key := range domainValues {
+			exist := domainStorageMap.ValueExists(key)
+			require.True(t, exist)
+		}
+
+		// Check if random value exists
+		for range 10 {
+			n := random.Int()
+			key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+			_, keyExist := domainValues[key]
+
+			exist := domainStorageMap.ValueExists(key)
+			require.Equal(t, keyExist, exist)
+		}
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapReadValue(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		key := interpreter.StringAtreeValue("key")
+		v := domainStorageMap.ReadValue(nil, interpreter.StringStorageMapKey(key))
+		require.Nil(t, v)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match DomainStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		for key, expectedValue := range domainValues {
+			value := domainStorageMap.ReadValue(nil, key)
+			require.NotNil(t, value)
+
+			checkCadenceValue(t, inter, value, expectedValue)
+		}
+
+		// Get non-existent value
+		for range 10 {
+			n := random.Int()
+			key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+			if _, keyExist := domainValues[key]; keyExist {
+				continue
+			}
+
+			value := domainStorageMap.ReadValue(nil, key)
+			require.Nil(t, value)
+		}
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapSetAndUpdateValue(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		const count = 10
+		domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random)
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		for key := range domainValues {
+			// Overwrite existing values
+			n := random.Int()
+
+			value := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+			domainStorageMap.WriteValue(inter, key, value)
+
+			domainValues[key] = value
+		}
+		require.Equal(t, uint64(count), domainStorageMap.Count())
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapRemoveValue(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		key := interpreter.StringAtreeValue("key")
+		existed := domainStorageMap.WriteValue(inter, interpreter.StringStorageMapKey(key), nil)
+		require.False(t, existed)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		for key := range domainValues {
+			existed := domainStorageMap.WriteValue(inter, key, nil)
+			require.True(t, existed)
+		}
+
+		// Remove non-existent value
+		for range 10 {
+			n := random.Int()
+			key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+			if _, keyExist := domainValues[key]; keyExist {
+				continue
+			}
+
+			existed := domainStorageMap.WriteValue(inter, key, nil)
+			require.False(t, existed)
+		}
+
+		clear(domainValues)
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapIteratorNext(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		domainValues := make(domainStorageMapValues)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		// Test calling Next() twice on empty account storage map.
+		for range 2 {
+			k, v := iterator.Next()
+			require.Nil(t, k)
+			require.Nil(t, v)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		elementCount := 0
+		for {
+			k, v := iterator.Next()
+			if k == nil {
+				break
+			}
+
+			elementCount++
+
+			kv := k.(interpreter.StringAtreeValue)
+
+			expectedValue, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)]
+			require.True(t, expectedValueExist)
+
+			checkCadenceValue(t, inter, v, expectedValue)
+		}
+		require.Equal(t, uint64(elementCount), domainStorageMap.Count())
+
+		// Test calling Next() after iterator reaches the end.
+		for range 2 {
+			k, v := iterator.Next()
+			require.Nil(t, k)
+			require.Nil(t, v)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapIteratorNextKey(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		domainValues := make(domainStorageMapValues)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		// Test calling NextKey() twice on empty account storage map.
+		for range 2 {
+			k := iterator.NextKey()
+			require.Nil(t, k)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		elementCount := 0
+		for {
+			k := iterator.NextKey()
+			if k == nil {
+				break
+			}
+
+			elementCount++
+
+			kv := k.(interpreter.StringAtreeValue)
+
+			_, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)]
+			require.True(t, expectedValueExist)
+		}
+		require.Equal(t, uint64(elementCount), domainStorageMap.Count())
+
+		// Test calling Next() after iterator reaches the end.
+		for range 2 {
+			k := iterator.NextKey()
+			require.Nil(t, k)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapIteratorNextValue(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		domainValues := make(domainStorageMapValues)
+
+		domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		// Test calling NextKey() twice on empty account storage map.
+		for range 2 {
+			v := iterator.NextValue()
+			require.Nil(t, v)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		random := rand.New(rand.NewSource(42))
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+		// This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any
+		// account register to match AccountStorageMap root slab.
+		const atreeValueValidationEnabled = true
+		const atreeStorageValidationEnabled = false
+		inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+			t,
+			storage,
+			atreeValueValidationEnabled,
+			atreeStorageValidationEnabled,
+		)
+
+		const count = 10
+		domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+		iterator := domainStorageMap.Iterator(nil)
+
+		elementCount := 0
+		for {
+			v := iterator.NextValue()
+			if v == nil {
+				break
+			}
+
+			elementCount++
+
+			ev, ok := v.(interpreter.EquatableValue)
+			require.True(t, ok)
+
+			match := false
+			for _, expectedValue := range domainValues {
+				if ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue) {
+					match = true
+					break
+				}
+			}
+			require.True(t, match)
+		}
+		require.Equal(t, uint64(elementCount), domainStorageMap.Count())
+
+		// Test calling NextValue() after iterator reaches the end.
+		for range 2 {
+			v := iterator.NextValue()
+			require.Nil(t, v)
+		}
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		valueID := domainStorageMap.ValueID()
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)})
+	})
+}
+
+func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	t.Run("empty", func(t *testing.T) {
+		t.Parallel()
+
+		init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) {
+			ledger := NewTestLedger(nil, nil)
+			storage := runtime.NewStorage(
+				ledger,
+				nil,
+				runtime.StorageConfig{},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+			require.NotNil(t, domainStorageMap)
+			require.Equal(t, uint64(0), domainStorageMap.Count())
+
+			err := storage.Commit(inter, false)
+			require.NoError(t, err)
+
+			valueID := domainStorageMap.ValueID()
+			return atreeValueIDToSlabID(valueID), make(domainStorageMapValues), ledger.StoredValues, ledger.StorageIndices
+		}
+
+		domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init()
+
+		ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID})
+	})
+
+	t.Run("non-empty", func(t *testing.T) {
+		t.Parallel()
+
+		init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) {
+			random := rand.New(rand.NewSource(42))
+
+			ledger := NewTestLedger(nil, nil)
+			storage := runtime.NewStorage(
+				ledger,
+				nil,
+				runtime.StorageConfig{},
+			)
+
+			// Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly.
+			// This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab.
+			const atreeValueValidationEnabled = true
+			const atreeStorageValidationEnabled = false
+			inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled)
+
+			const count = 10
+			domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random)
+
+			err := storage.Commit(inter, false)
+			require.NoError(t, err)
+
+			valueID := domainStorageMap.ValueID()
+			return atreeValueIDToSlabID(valueID), domainValues, ledger.StoredValues, ledger.StorageIndices
+		}
+
+		domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init()
+
+		ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{},
+		)
+
+		domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		checkDomainStorageMapData(t, inter, domainStorageMap, domainValues)
+
+		CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID})
+	})
+}
+
+func createDomainStorageMap(
+	storage atree.SlabStorage,
+	inter *interpreter.Interpreter,
+	address common.Address,
+	count int,
+	random *rand.Rand,
+) (*interpreter.DomainStorageMap, domainStorageMapValues) {
+
+	// Create domain storage map
+	domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+
+	// Write to new domain storage map
+	domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random)
+
+	return domainStorageMap, domainValues
+}
+
+func atreeValueIDToSlabID(vid atree.ValueID) atree.SlabID {
+	return atree.NewSlabID(
+		atree.Address(vid[:8]),
+		atree.SlabIndex(vid[8:]),
+	)
+}
diff --git a/interpreter/interpreter.go b/interpreter/interpreter.go
index efaa58ba94..4f0874e923 100644
--- a/interpreter/interpreter.go
+++ b/interpreter/interpreter.go
@@ -237,7 +237,12 @@ func (c TypeCodes) Merge(codes TypeCodes) {
 
 type Storage interface {
 	atree.SlabStorage
-	GetStorageMap(address common.Address, domain common.StorageDomain, createIfNotExists bool) *StorageMap
+	GetDomainStorageMap(
+		inter *Interpreter,
+		address common.Address,
+		domain common.StorageDomain,
+		createIfNotExists bool,
+	) *DomainStorageMap
 	CheckHealth() error
 }
 
@@ -2681,7 +2686,7 @@ func (interpreter *Interpreter) StoredValueExists(
 	domain common.StorageDomain,
 	identifier StorageMapKey,
 ) bool {
-	accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false)
+	accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false)
 	if accountStorage == nil {
 		return false
 	}
@@ -2693,7 +2698,7 @@ func (interpreter *Interpreter) ReadStored(
 	domain common.StorageDomain,
 	identifier StorageMapKey,
 ) Value {
-	accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false)
+	accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false)
 	if accountStorage == nil {
 		return nil
 	}
@@ -2706,7 +2711,7 @@ func (interpreter *Interpreter) WriteStored(
 	key StorageMapKey,
 	value Value,
 ) (existed bool) {
-	accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, true)
+	accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, true)
 	return accountStorage.WriteValue(interpreter, key, value)
 }
 
@@ -4069,7 +4074,7 @@ func (interpreter *Interpreter) IsSubTypeOfSemaType(staticSubType StaticType, su
 }
 
 func (interpreter *Interpreter) domainPaths(address common.Address, domain common.PathDomain) []Value {
-	storageMap := interpreter.Storage().GetStorageMap(address, domain.StorageDomain(), false)
+	storageMap := interpreter.Storage().GetDomainStorageMap(interpreter, address, domain.StorageDomain(), false)
 	if storageMap == nil {
 		return []Value{}
 	}
@@ -4164,7 +4169,7 @@ func (interpreter *Interpreter) newStorageIterationFunction(
 			parameterTypes := fnType.ParameterTypes()
 			returnType := fnType.ReturnTypeAnnotation.Type
 
-			storageMap := config.Storage.GetStorageMap(address, domain.StorageDomain(), false)
+			storageMap := config.Storage.GetDomainStorageMap(interpreter, address, domain.StorageDomain(), false)
 			if storageMap == nil {
 				// if nothing is stored, no iteration is required
 				return Void
diff --git a/interpreter/interpreter_tracing.go b/interpreter/interpreter_tracing.go
index 365eebed42..10ee3dd418 100644
--- a/interpreter/interpreter_tracing.go
+++ b/interpreter/interpreter_tracing.go
@@ -30,9 +30,10 @@ const (
 	tracingImportPrefix   = "import."
 
 	// type prefixes
-	tracingArrayPrefix      = "array."
-	tracingDictionaryPrefix = "dictionary."
-	tracingCompositePrefix  = "composite."
+	tracingArrayPrefix            = "array."
+	tracingDictionaryPrefix       = "dictionary."
+	tracingCompositePrefix        = "composite."
+	tracingDomainStorageMapPrefix = "domainstoragemap."
 
 	// Value operation postfixes
 	tracingConstructPostfix            = "construct"
@@ -162,6 +163,20 @@ func (interpreter *Interpreter) reportDictionaryValueDeepRemoveTrace(
 	)
 }
 
+func (interpreter *Interpreter) reportDomainStorageMapDeepRemoveTrace(
+	typeInfo string,
+	count int,
+	duration time.Duration,
+) {
+	config := interpreter.SharedState.Config
+	config.OnRecordTrace(
+		interpreter,
+		tracingDomainStorageMapPrefix+tracingDeepRemovePostfix,
+		duration,
+		prepareArrayAndMapValueTraceAttrs(typeInfo, count),
+	)
+}
+
 func (interpreter *Interpreter) reportDictionaryValueDestroyTrace(
 	typeInfo string,
 	count int,
diff --git a/interpreter/misc_test.go b/interpreter/misc_test.go
index 1784bbcde6..aa952bcc41 100644
--- a/interpreter/misc_test.go
+++ b/interpreter/misc_test.go
@@ -5350,7 +5350,7 @@ func TestInterpretReferenceFailableDowncasting(t *testing.T) {
 		)
 
 		domain := storagePath.Domain.StorageDomain()
-		storageMap := storage.GetStorageMap(storageAddress, domain, true)
+		storageMap := storage.GetDomainStorageMap(inter, storageAddress, domain, true)
 		storageMapKey := interpreter.StringStorageMapKey(storagePath.Identifier)
 		storageMap.WriteValue(inter, storageMapKey, r)
 
diff --git a/interpreter/storage.go b/interpreter/storage.go
index d5e53c9a4e..1927cdfa0d 100644
--- a/interpreter/storage.go
+++ b/interpreter/storage.go
@@ -20,6 +20,7 @@ package interpreter
 
 import (
 	"bytes"
+	"cmp"
 	"io"
 	"math"
 	"strings"
@@ -106,6 +107,19 @@ type StorageDomainKey struct {
 	Address common.Address
 }
 
+func (k StorageDomainKey) Compare(o StorageDomainKey) int {
+	switch bytes.Compare(k.Address[:], o.Address[:]) {
+	case -1:
+		return -1
+	case 0:
+		return cmp.Compare(k.Domain, o.Domain)
+	case 1:
+		return 1
+	default:
+		panic(errors.NewUnreachableError())
+	}
+}
+
 func NewStorageDomainKey(
 	memoryGauge common.MemoryGauge,
 	address common.Address,
@@ -147,8 +161,8 @@ func (k StorageKey) IsLess(o StorageKey) bool {
 // InMemoryStorage
 type InMemoryStorage struct {
 	*atree.BasicSlabStorage
-	StorageMaps map[StorageDomainKey]*StorageMap
-	memoryGauge common.MemoryGauge
+	DomainStorageMaps map[StorageDomainKey]*DomainStorageMap
+	memoryGauge       common.MemoryGauge
 }
 
 var _ Storage = InMemoryStorage{}
@@ -174,26 +188,27 @@ func NewInMemoryStorage(memoryGauge common.MemoryGauge) InMemoryStorage {
 	)
 
 	return InMemoryStorage{
-		BasicSlabStorage: slabStorage,
-		StorageMaps:      make(map[StorageDomainKey]*StorageMap),
-		memoryGauge:      memoryGauge,
+		BasicSlabStorage:  slabStorage,
+		DomainStorageMaps: make(map[StorageDomainKey]*DomainStorageMap),
+		memoryGauge:       memoryGauge,
 	}
 }
 
-func (i InMemoryStorage) GetStorageMap(
+func (i InMemoryStorage) GetDomainStorageMap(
+	_ *Interpreter,
 	address common.Address,
 	domain common.StorageDomain,
 	createIfNotExists bool,
 ) (
-	storageMap *StorageMap,
+	domainStorageMap *DomainStorageMap,
 ) {
 	key := NewStorageDomainKey(i.memoryGauge, address, domain)
-	storageMap = i.StorageMaps[key]
-	if storageMap == nil && createIfNotExists {
-		storageMap = NewStorageMap(i.memoryGauge, i, atree.Address(address))
-		i.StorageMaps[key] = storageMap
+	domainStorageMap = i.DomainStorageMaps[key]
+	if domainStorageMap == nil && createIfNotExists {
+		domainStorageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address))
+		i.DomainStorageMaps[key] = domainStorageMap
 	}
-	return storageMap
+	return domainStorageMap
 }
 
 func (i InMemoryStorage) CheckHealth() error {
diff --git a/interpreter/storage_test.go b/interpreter/storage_test.go
index 21e0f298d3..fe822fd05b 100644
--- a/interpreter/storage_test.go
+++ b/interpreter/storage_test.go
@@ -524,7 +524,7 @@ func TestStorageOverwriteAndRemove(t *testing.T) {
 
 		const storageMapKey = StringStorageMapKey("test")
 
-		storageMap := storage.GetStorageMap(address, common.StorageDomainPathStorage, true)
+		storageMap := storage.GetDomainStorageMap(inter, address, common.StorageDomainPathStorage, true)
 		storageMap.WriteValue(inter, storageMapKey, array1)
 
 		// Overwriting delete any existing child slabs
diff --git a/interpreter/stringatreevalue_test.go b/interpreter/stringatreevalue_test.go
index 00fa5e988c..ffaedf6d44 100644
--- a/interpreter/stringatreevalue_test.go
+++ b/interpreter/stringatreevalue_test.go
@@ -36,12 +36,6 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) {
 
 	storage := NewInMemoryStorage(nil)
 
-	storageMap := storage.GetStorageMap(
-		common.MustBytesToAddress([]byte{0x1}),
-		common.PathDomainStorage.StorageDomain(),
-		true,
-	)
-
 	inter, err := NewInterpreter(
 		nil,
 		common.StringLocation("test"),
@@ -51,6 +45,13 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) {
 	)
 	require.NoError(t, err)
 
+	storageMap := storage.GetDomainStorageMap(
+		inter,
+		common.MustBytesToAddress([]byte{0x1}),
+		common.PathDomainStorage.StorageDomain(),
+		true,
+	)
+
 	// Generate a large key to force the string to get stored in a separate slab
 	keyValue := NewStringAtreeValue(nil, strings.Repeat("x", 10_000))
 
diff --git a/interpreter/value_test.go b/interpreter/value_test.go
index c0bbe137d1..3eaace67f8 100644
--- a/interpreter/value_test.go
+++ b/interpreter/value_test.go
@@ -3806,7 +3806,7 @@ func TestValue_ConformsToStaticType(t *testing.T) {
 		)
 		require.NoError(t, err)
 
-		storageMap := storage.GetStorageMap(testAddress, common.StorageDomainPathStorage, true)
+		storageMap := storage.GetDomainStorageMap(inter, testAddress, common.StorageDomainPathStorage, true)
 		storageMap.WriteValue(inter, StringStorageMapKey("test"), TrueValue)
 
 		value := valueFactory(inter)
diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go
new file mode 100644
index 0000000000..53eec2fef2
--- /dev/null
+++ b/runtime/account_storage_v1.go
@@ -0,0 +1,212 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime
+
+import (
+	"sort"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+	"github.com/onflow/cadence/interpreter"
+)
+
+type AccountStorageV1 struct {
+	ledger      atree.Ledger
+	slabStorage atree.SlabStorage
+	memoryGauge common.MemoryGauge
+
+	// newDomainStorageMapSlabIndices contains root slab indices of new domain storage maps.
+	// The indices are saved using Ledger.SetValue() during commit().
+	// Key is StorageDomainKey{common.StorageDomain, Address} and value is 8-byte slab index.
+	newDomainStorageMapSlabIndices map[interpreter.StorageDomainKey]atree.SlabIndex
+}
+
+func NewAccountStorageV1(
+	ledger atree.Ledger,
+	slabStorage atree.SlabStorage,
+	memoryGauge common.MemoryGauge,
+) *AccountStorageV1 {
+	return &AccountStorageV1{
+		ledger:      ledger,
+		slabStorage: slabStorage,
+		memoryGauge: memoryGauge,
+	}
+}
+
+func (s *AccountStorageV1) GetDomainStorageMap(
+	address common.Address,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) (
+	domainStorageMap *interpreter.DomainStorageMap,
+) {
+	var err error
+	domainStorageMap, err = getDomainStorageMapFromV1DomainRegister(
+		s.ledger,
+		s.slabStorage,
+		address,
+		domain,
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	if domainStorageMap == nil && createIfNotExists {
+		domainStorageMap = s.storeNewDomainStorageMap(address, domain)
+	}
+
+	return domainStorageMap
+}
+
+func (s *AccountStorageV1) storeNewDomainStorageMap(
+	address common.Address,
+	domain common.StorageDomain,
+) *interpreter.DomainStorageMap {
+
+	domainStorageMap := interpreter.NewDomainStorageMap(
+		s.memoryGauge,
+		s.slabStorage,
+		atree.Address(address),
+	)
+
+	slabIndex := domainStorageMap.SlabID().Index()
+
+	storageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain)
+
+	if s.newDomainStorageMapSlabIndices == nil {
+		s.newDomainStorageMapSlabIndices = map[interpreter.StorageDomainKey]atree.SlabIndex{}
+	}
+	s.newDomainStorageMapSlabIndices[storageKey] = slabIndex
+
+	return domainStorageMap
+}
+
+func (s *AccountStorageV1) commit() error {
+
+	switch len(s.newDomainStorageMapSlabIndices) {
+	case 0:
+		// Nothing to commit.
+		return nil
+
+	case 1:
+		// Optimize for the common case of a single domain storage map.
+
+		var updated int
+		for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange
+			if updated > 0 {
+				panic(errors.NewUnreachableError())
+			}
+
+			err := s.writeStorageDomainSlabIndex(
+				storageDomainKey,
+				slabIndex,
+			)
+			if err != nil {
+				return err
+			}
+
+			updated++
+		}
+
+	default:
+		// Sort the indices to ensure deterministic order
+
+		type domainStorageMapSlabIndex struct {
+			StorageDomainKey interpreter.StorageDomainKey
+			SlabIndex        atree.SlabIndex
+		}
+
+		slabIndices := make([]domainStorageMapSlabIndex, 0, len(s.newDomainStorageMapSlabIndices))
+		for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange
+			slabIndices = append(
+				slabIndices,
+				domainStorageMapSlabIndex{
+					StorageDomainKey: storageDomainKey,
+					SlabIndex:        slabIndex,
+				},
+			)
+		}
+		sort.Slice(
+			slabIndices,
+			func(i, j int) bool {
+				slabIndex1 := slabIndices[i]
+				slabIndex2 := slabIndices[j]
+				domainKey1 := slabIndex1.StorageDomainKey
+				domainKey2 := slabIndex2.StorageDomainKey
+				return domainKey1.Compare(domainKey2) < 0
+			},
+		)
+
+		for _, slabIndex := range slabIndices {
+			err := s.writeStorageDomainSlabIndex(
+				slabIndex.StorageDomainKey,
+				slabIndex.SlabIndex,
+			)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	s.newDomainStorageMapSlabIndices = nil
+
+	return nil
+}
+
+func (s *AccountStorageV1) writeStorageDomainSlabIndex(
+	storageDomainKey interpreter.StorageDomainKey,
+	slabIndex atree.SlabIndex,
+) error {
+	return writeSlabIndexToRegister(
+		s.ledger,
+		storageDomainKey.Address,
+		[]byte(storageDomainKey.Domain.Identifier()),
+		slabIndex,
+	)
+}
+
+// getDomainStorageMapFromV1DomainRegister returns domain storage map from legacy domain register.
+func getDomainStorageMapFromV1DomainRegister(
+	ledger atree.Ledger,
+	storage atree.SlabStorage,
+	address common.Address,
+	domain common.StorageDomain,
+) (*interpreter.DomainStorageMap, error) {
+
+	domainStorageSlabIndex, domainRegisterExists, err := readSlabIndexFromRegister(
+		ledger,
+		address,
+		[]byte(domain.Identifier()),
+	)
+	if err != nil {
+		return nil, err
+	}
+	if !domainRegisterExists {
+		return nil, nil
+	}
+
+	slabID := atree.NewSlabID(
+		atree.Address(address),
+		domainStorageSlabIndex,
+	)
+
+	return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil
+}
diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go
new file mode 100644
index 0000000000..71e19fdaed
--- /dev/null
+++ b/runtime/account_storage_v2.go
@@ -0,0 +1,319 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime
+
+import (
+	"sort"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+	"github.com/onflow/cadence/interpreter"
+)
+
+type AccountStorageV2 struct {
+	ledger      atree.Ledger
+	slabStorage atree.SlabStorage
+	memoryGauge common.MemoryGauge
+
+	// cachedAccountStorageMaps is a cache of account storage maps.
+	cachedAccountStorageMaps map[common.Address]*interpreter.AccountStorageMap
+
+	// newAccountStorageMapSlabIndices contains root slab indices of new account storage maps.
+	// The indices are saved using Ledger.SetValue() during commit().
+	newAccountStorageMapSlabIndices map[common.Address]atree.SlabIndex
+}
+
+func NewAccountStorageV2(
+	ledger atree.Ledger,
+	slabStorage atree.SlabStorage,
+	memoryGauge common.MemoryGauge,
+) *AccountStorageV2 {
+	return &AccountStorageV2{
+		ledger:      ledger,
+		slabStorage: slabStorage,
+		memoryGauge: memoryGauge,
+	}
+}
+
+func (s *AccountStorageV2) GetDomainStorageMap(
+	inter *interpreter.Interpreter,
+	address common.Address,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) (
+	domainStorageMap *interpreter.DomainStorageMap,
+) {
+	accountStorageMap := s.getAccountStorageMap(address)
+
+	if accountStorageMap == nil && createIfNotExists {
+		accountStorageMap = s.storeNewAccountStorageMap(address)
+	}
+
+	if accountStorageMap != nil {
+		domainStorageMap = accountStorageMap.GetDomain(
+			s.memoryGauge,
+			inter,
+			domain,
+			createIfNotExists,
+		)
+	}
+
+	return
+}
+
+// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise.
+func (s *AccountStorageV2) getAccountStorageMap(
+	address common.Address,
+) (
+	accountStorageMap *interpreter.AccountStorageMap,
+) {
+	// Return cached account storage map if it exists.
+
+	if s.cachedAccountStorageMaps != nil {
+		accountStorageMap = s.cachedAccountStorageMaps[address]
+		if accountStorageMap != nil {
+			return accountStorageMap
+		}
+	}
+
+	defer func() {
+		if accountStorageMap != nil {
+			s.cacheAccountStorageMap(
+				address,
+				accountStorageMap,
+			)
+		}
+	}()
+
+	// Load account storage map if account storage register exists.
+
+	var err error
+	accountStorageMap, err = getAccountStorageMapFromRegister(
+		s.ledger,
+		s.slabStorage,
+		address,
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	return
+}
+
+func (s *AccountStorageV2) cacheAccountStorageMap(
+	address common.Address,
+	accountStorageMap *interpreter.AccountStorageMap,
+) {
+	if s.cachedAccountStorageMaps == nil {
+		s.cachedAccountStorageMaps = map[common.Address]*interpreter.AccountStorageMap{}
+	}
+	s.cachedAccountStorageMaps[address] = accountStorageMap
+}
+
+func (s *AccountStorageV2) storeNewAccountStorageMap(
+	address common.Address,
+) *interpreter.AccountStorageMap {
+
+	accountStorageMap := interpreter.NewAccountStorageMap(
+		s.memoryGauge,
+		s.slabStorage,
+		atree.Address(address),
+	)
+
+	slabIndex := accountStorageMap.SlabID().Index()
+
+	s.SetNewAccountStorageMapSlabIndex(
+		address,
+		slabIndex,
+	)
+
+	s.cacheAccountStorageMap(
+		address,
+		accountStorageMap,
+	)
+
+	return accountStorageMap
+}
+
+func (s *AccountStorageV2) SetNewAccountStorageMapSlabIndex(
+	address common.Address,
+	slabIndex atree.SlabIndex,
+) {
+	if s.newAccountStorageMapSlabIndices == nil {
+		s.newAccountStorageMapSlabIndices = map[common.Address]atree.SlabIndex{}
+	}
+	s.newAccountStorageMapSlabIndices[address] = slabIndex
+}
+
+func (s *AccountStorageV2) commit() error {
+	switch len(s.newAccountStorageMapSlabIndices) {
+	case 0:
+		// Nothing to commit.
+		return nil
+
+	case 1:
+		// Optimize for the common case of a single account storage map.
+
+		var updated int
+		for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange
+			if updated > 0 {
+				panic(errors.NewUnreachableError())
+			}
+
+			err := s.writeAccountStorageSlabIndex(
+				address,
+				slabIndex,
+			)
+			if err != nil {
+				return err
+			}
+
+			updated++
+		}
+
+	default:
+		// Sort the indices to ensure deterministic order
+
+		type accountStorageMapSlabIndex struct {
+			Address   common.Address
+			SlabIndex atree.SlabIndex
+		}
+
+		slabIndices := make([]accountStorageMapSlabIndex, 0, len(s.newAccountStorageMapSlabIndices))
+		for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange
+			slabIndices = append(
+				slabIndices,
+				accountStorageMapSlabIndex{
+					Address:   address,
+					SlabIndex: slabIndex,
+				},
+			)
+		}
+		sort.Slice(
+			slabIndices,
+			func(i, j int) bool {
+				slabIndex1 := slabIndices[i]
+				slabIndex2 := slabIndices[j]
+				address1 := slabIndex1.Address
+				address2 := slabIndex2.Address
+				return address1.Compare(address2) < 0
+			},
+		)
+
+		for _, slabIndex := range slabIndices {
+			err := s.writeAccountStorageSlabIndex(
+				slabIndex.Address,
+				slabIndex.SlabIndex,
+			)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	s.newAccountStorageMapSlabIndices = nil
+
+	return nil
+}
+
+func (s *AccountStorageV2) writeAccountStorageSlabIndex(
+	address common.Address,
+	slabIndex atree.SlabIndex,
+) error {
+	return writeSlabIndexToRegister(
+		s.ledger,
+		address,
+		[]byte(AccountStorageKey),
+		slabIndex,
+	)
+}
+
+func readAccountStorageSlabIndexFromRegister(
+	ledger atree.Ledger,
+	address common.Address,
+) (
+	atree.SlabIndex,
+	bool,
+	error,
+) {
+	return readSlabIndexFromRegister(
+		ledger,
+		address,
+		[]byte(AccountStorageKey),
+	)
+}
+
+func getAccountStorageMapFromRegister(
+	ledger atree.Ledger,
+	slabStorage atree.SlabStorage,
+	address common.Address,
+) (
+	*interpreter.AccountStorageMap,
+	error,
+) {
+	slabIndex, registerExists, err := readAccountStorageSlabIndexFromRegister(
+		ledger,
+		address,
+	)
+	if err != nil {
+		return nil, err
+	}
+	if !registerExists {
+		return nil, nil
+	}
+
+	slabID := atree.NewSlabID(
+		atree.Address(address),
+		slabIndex,
+	)
+
+	return interpreter.NewAccountStorageMapWithRootID(slabStorage, slabID), nil
+}
+
+func hasAccountStorageMap(
+	ledger atree.Ledger,
+	address common.Address,
+) (bool, error) {
+
+	_, registerExists, err := readAccountStorageSlabIndexFromRegister(
+		ledger,
+		address,
+	)
+	if err != nil {
+		return false, err
+	}
+	return registerExists, nil
+}
+
+func (s *AccountStorageV2) cachedRootSlabIDs() []atree.SlabID {
+
+	var slabIDs []atree.SlabID
+
+	// Get cached account storage map slab IDs.
+	for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange
+		slabIDs = append(
+			slabIDs,
+			storageMap.SlabID(),
+		)
+	}
+
+	return slabIDs
+}
diff --git a/runtime/capabilitycontrollers_test.go b/runtime/capabilitycontrollers_test.go
index 2e396121df..78ff497b96 100644
--- a/runtime/capabilitycontrollers_test.go
+++ b/runtime/capabilitycontrollers_test.go
@@ -3251,7 +3251,10 @@ func TestRuntimeCapabilityControllers(t *testing.T) {
 				)
 				require.NoError(t, err)
 
-				storageMap := storage.GetStorageMap(
+				// Use *interpreter.Interpreter(nil) here because createIfNotExists is false.
+
+				storageMap := storage.GetDomainStorageMap(
+					nil,
 					common.MustBytesToAddress([]byte{0x1}),
 					common.StorageDomainPathCapability,
 					false,
@@ -3840,7 +3843,8 @@ func TestRuntimeCapabilitiesGetBackwardCompatibility(t *testing.T) {
 		})
 		require.NoError(t, err)
 
-		publicStorageMap := storage.GetStorageMap(
+		publicStorageMap := storage.GetDomainStorageMap(
+			inter,
 			testAddress,
 			common.PathDomainPublic.StorageDomain(),
 			true,
@@ -3947,7 +3951,8 @@ func TestRuntimeCapabilitiesPublishBackwardCompatibility(t *testing.T) {
 		})
 		require.NoError(t, err)
 
-		publicStorageMap := storage.GetStorageMap(
+		publicStorageMap := storage.GetDomainStorageMap(
+			inter,
 			testAddress,
 			common.PathDomainStorage.StorageDomain(),
 			true,
@@ -4037,7 +4042,8 @@ func TestRuntimeCapabilitiesUnpublishBackwardCompatibility(t *testing.T) {
 		})
 		require.NoError(t, err)
 
-		publicStorageMap := storage.GetStorageMap(
+		publicStorageMap := storage.GetDomainStorageMap(
+			inter,
 			testAddress,
 			common.PathDomainPublic.StorageDomain(),
 			true,
diff --git a/runtime/config.go b/runtime/config.go
index d6882cb353..68926367d0 100644
--- a/runtime/config.go
+++ b/runtime/config.go
@@ -37,4 +37,6 @@ type Config struct {
 	CoverageReport *CoverageReport
 	// LegacyContractUpgradeEnabled enabled specifies whether to use the old parser when parsing an old contract
 	LegacyContractUpgradeEnabled bool
+	// StorageFormatV2Enabled specifies whether storage format V2 is enabled
+	StorageFormatV2Enabled bool
 }
diff --git a/runtime/contract_function_executor.go b/runtime/contract_function_executor.go
index 8ba0f49bf8..19960185f2 100644
--- a/runtime/contract_function_executor.go
+++ b/runtime/contract_function_executor.go
@@ -105,7 +105,13 @@ func (executor *interpreterContractFunctionExecutor) preprocess() (err error) {
 
 	runtimeInterface := context.Interface
 
-	storage := NewStorage(runtimeInterface, runtimeInterface)
+	storage := NewStorage(
+		runtimeInterface,
+		runtimeInterface,
+		StorageConfig{
+			StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled,
+		},
+	)
 	executor.storage = storage
 
 	environment := context.Environment
diff --git a/runtime/contract_test.go b/runtime/contract_test.go
index 32dc0b12cd..d62436af55 100644
--- a/runtime/contract_test.go
+++ b/runtime/contract_test.go
@@ -44,18 +44,21 @@ func TestRuntimeContract(t *testing.T) {
 	t.Parallel()
 
 	type testCase struct {
-		name        string // the name of the contract used in add/update calls
-		code        string // the code we use to add the contract
-		code2       string // the code we use to update the contract
-		valid       bool
-		isInterface bool
+		name                   string // the name of the contract used in add/update calls
+		code                   string // the code we use to add the contract
+		code2                  string // the code we use to update the contract
+		valid                  bool
+		isInterface            bool
+		storageFormatV2Enabled bool
 	}
 
-	test := func(t *testing.T, tc testCase) {
-
+	runTest := func(t *testing.T, tc testCase) {
 		t.Parallel()
 
-		runtime := NewTestInterpreterRuntime()
+		config := DefaultTestInterpreterConfig
+		config.StorageFormatV2Enabled = tc.storageFormatV2Enabled
+
+		runtime := NewTestInterpreterRuntimeWithConfig(config)
 
 		var loggedMessages []string
 
@@ -222,8 +225,18 @@ func TestRuntimeContract(t *testing.T) {
 		// so getting the storage map here once upfront would result in outdated data
 
 		getContractValueExists := func() bool {
-			storageMap := NewStorage(storage, nil).
-				GetStorageMap(signerAddress, common.StorageDomainContract, false)
+			storageMap := NewStorage(
+				storage,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			).GetDomainStorageMap(
+				inter,
+				signerAddress,
+				common.StorageDomainContract,
+				false,
+			)
 			if storageMap == nil {
 				return false
 			}
@@ -514,6 +527,18 @@ func TestRuntimeContract(t *testing.T) {
 
 	}
 
+	test := func(t *testing.T, tc testCase) {
+		t.Run("storage format V2 disabled", func(t *testing.T) {
+			tc.storageFormatV2Enabled = false
+			runTest(t, tc)
+		})
+
+		t.Run("storage format V2 enabled", func(t *testing.T) {
+			tc.storageFormatV2Enabled = true
+			runTest(t, tc)
+		})
+	}
+
 	t.Run("valid contract, correct name", func(t *testing.T) {
 		test(t, testCase{
 			name:        "Test",
diff --git a/runtime/environment.go b/runtime/environment.go
index 4c4d13b69a..6caa05fc8b 100644
--- a/runtime/environment.go
+++ b/runtime/environment.go
@@ -1106,7 +1106,8 @@ func (e *interpreterEnvironment) loadContract(
 
 	location := compositeType.Location
 	if addressLocation, ok := location.(common.AddressLocation); ok {
-		storageMap := e.storage.GetStorageMap(
+		storageMap := e.storage.GetDomainStorageMap(
+			inter,
 			addressLocation.Address,
 			common.StorageDomainContract,
 			false,
diff --git a/runtime/ft_test.go b/runtime/ft_test.go
index 739920488f..144f062e1c 100644
--- a/runtime/ft_test.go
+++ b/runtime/ft_test.go
@@ -1083,7 +1083,8 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) {
 		contractsAddress,
 	)
 
-	contractStorage := storage.GetStorageMap(
+	contractStorage := storage.GetDomainStorageMap(
+		inter,
 		contractsAddress,
 		common.StorageDomainContract,
 		true,
@@ -1118,7 +1119,8 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) {
 		userAddress,
 	)
 
-	userStorage := storage.GetStorageMap(
+	userStorage := storage.GetDomainStorageMap(
+		inter,
 		userAddress,
 		common.PathDomainStorage.StorageDomain(),
 		true,
diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go
new file mode 100644
index 0000000000..08d6afa55d
--- /dev/null
+++ b/runtime/migrate_domain_registers.go
@@ -0,0 +1,171 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime
+
+import (
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+	"github.com/onflow/cadence/interpreter"
+)
+
+type GetDomainStorageMapFunc func(
+	ledger atree.Ledger,
+	storage atree.SlabStorage,
+	address common.Address,
+	domain common.StorageDomain,
+) (
+	*interpreter.DomainStorageMap,
+	error,
+)
+
+// DomainRegisterMigration migrates domain registers to account storage maps.
+type DomainRegisterMigration struct {
+	ledger              atree.Ledger
+	storage             atree.SlabStorage
+	inter               *interpreter.Interpreter
+	memoryGauge         common.MemoryGauge
+	getDomainStorageMap GetDomainStorageMapFunc
+}
+
+func NewDomainRegisterMigration(
+	ledger atree.Ledger,
+	storage atree.SlabStorage,
+	inter *interpreter.Interpreter,
+	memoryGauge common.MemoryGauge,
+	getDomainStorageMap GetDomainStorageMapFunc,
+) *DomainRegisterMigration {
+	if getDomainStorageMap == nil {
+		getDomainStorageMap = getDomainStorageMapFromV1DomainRegister
+	}
+	return &DomainRegisterMigration{
+		ledger:              ledger,
+		storage:             storage,
+		inter:               inter,
+		memoryGauge:         memoryGauge,
+		getDomainStorageMap: getDomainStorageMap,
+	}
+}
+
+func (m *DomainRegisterMigration) MigrateAccount(
+	address common.Address,
+) (
+	*interpreter.AccountStorageMap,
+	error,
+) {
+	exists, err := hasAccountStorageMap(m.ledger, address)
+	if err != nil {
+		return nil, err
+	}
+	if exists {
+		// Account storage map already exists
+		return nil, nil
+	}
+
+	// Migrate existing domains
+	accountStorageMap, err := m.migrateDomainRegisters(address)
+	if err != nil {
+		return nil, err
+	}
+
+	if accountStorageMap == nil {
+		// Nothing migrated
+		return nil, nil
+	}
+
+	slabIndex := accountStorageMap.SlabID().Index()
+
+	// Write account register
+	errors.WrapPanic(func() {
+		err = m.ledger.SetValue(
+			address[:],
+			[]byte(AccountStorageKey),
+			slabIndex[:],
+		)
+	})
+	if err != nil {
+		return nil, interpreter.WrappedExternalError(err)
+	}
+
+	return accountStorageMap, nil
+}
+
+// migrateDomainRegisters migrates all existing domain storage maps to a new account storage map,
+// and removes the domain registers.
+func (m *DomainRegisterMigration) migrateDomainRegisters(
+	address common.Address,
+) (
+	*interpreter.AccountStorageMap,
+	error,
+) {
+
+	var accountStorageMap *interpreter.AccountStorageMap
+
+	for _, domain := range common.AllStorageDomains {
+
+		domainStorageMap, err := m.getDomainStorageMap(
+			m.ledger,
+			m.storage,
+			address,
+			domain,
+		)
+		if err != nil {
+			return nil, err
+		}
+
+		if domainStorageMap == nil {
+			// Skip non-existent domain
+			continue
+		}
+
+		if accountStorageMap == nil {
+			accountStorageMap = interpreter.NewAccountStorageMap(
+				m.memoryGauge,
+				m.storage,
+				atree.Address(address),
+			)
+		}
+
+		// Migrate (insert) existing domain storage map to account storage map
+		existed := accountStorageMap.WriteDomain(m.inter, domain, domainStorageMap)
+		if existed {
+			// This shouldn't happen because we are inserting domain storage map into empty account storage map.
+			return nil, errors.NewUnexpectedError(
+				"failed to migrate domain %s for account %x: domain already exists in account storage map",
+				domain.Identifier(),
+				address,
+			)
+		}
+
+		// Remove migrated domain registers
+		errors.WrapPanic(func() {
+			// NOTE: removing non-existent domain registers is no-op.
+			err = m.ledger.SetValue(
+				address[:],
+				[]byte(domain.Identifier()),
+				nil)
+		})
+		if err != nil {
+			return nil, interpreter.WrappedExternalError(err)
+		}
+	}
+
+	return accountStorageMap, nil
+}
diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go
new file mode 100644
index 0000000000..f1d6e4304c
--- /dev/null
+++ b/runtime/migrate_domain_registers_test.go
@@ -0,0 +1,532 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime_test
+
+import (
+	"math"
+	"math/rand"
+	goruntime "runtime"
+	"strconv"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+	"github.com/onflow/cadence/interpreter"
+	"github.com/onflow/cadence/runtime"
+	. "github.com/onflow/cadence/test_utils/interpreter_utils"
+	. "github.com/onflow/cadence/test_utils/runtime_utils"
+)
+
+func TestMigrateDomainRegisters(t *testing.T) {
+	t.Parallel()
+
+	isAtreeRegister := func(key string) bool {
+		return key[0] == '$' && len(key) == 9
+	}
+
+	getNonAtreeRegisters := func(values map[string][]byte) map[string][]byte {
+		nonAtreeRegisters := make(map[string][]byte)
+		for k, v := range values {
+			ks := strings.Split(k, "|")
+			if !isAtreeRegister(ks[1]) && len(v) > 0 {
+				nonAtreeRegisters[k] = v
+			}
+		}
+		return nonAtreeRegisters
+	}
+
+	address1 := common.MustBytesToAddress([]byte{0x1})
+	address2 := common.MustBytesToAddress([]byte{0x2})
+
+	addresses := []common.Address{address2, address1}
+
+	t.Run("accounts without domain registers", func(t *testing.T) {
+		t.Parallel()
+
+		ledger := NewTestLedger(nil, nil)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		migrator := runtime.NewDomainRegisterMigration(
+			ledger,
+			storage,
+			inter,
+			nil,
+			nil,
+		)
+
+		for _, address := range addresses {
+			accountStorageMap, err := migrator.MigrateAccount(address)
+			require.Nil(t, accountStorageMap)
+			require.NoError(t, err)
+		}
+
+		err := storage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		require.Equal(t, 0, len(ledger.StoredValues))
+	})
+
+	t.Run("accounts with domain registers", func(t *testing.T) {
+		t.Parallel()
+
+		accountsInfo := []accountInfo{
+			{
+				address: address1,
+				domains: []domainInfo{
+					{domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+					{domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+				},
+			},
+			{
+				address: address2,
+				domains: []domainInfo{
+					{domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+				},
+			},
+		}
+
+		ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		migrator := runtime.NewDomainRegisterMigration(
+			ledger,
+			storage,
+			inter,
+			nil,
+			nil,
+		)
+
+		var accountStorageMaps []*interpreter.AccountStorageMap
+		for _, address := range addresses {
+			accountStorageMap, err := migrator.MigrateAccount(address)
+			require.NotNil(t, accountStorageMap)
+			require.NoError(t, err)
+			accountStorageMaps = append(accountStorageMaps, accountStorageMap)
+		}
+
+		err := storage.FastCommit(goruntime.NumCPU())
+		require.NoError(t, err)
+
+		// Check non-atree registers
+		nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues)
+		require.Equal(t, len(addresses), len(nonAtreeRegisters))
+		require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey)
+		require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey)
+
+		// Check atree storage
+		expectedRootSlabIDs := make([]atree.SlabID, 0, len(accountStorageMaps))
+		for _, accountStorageMap := range accountStorageMaps {
+			expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID())
+		}
+
+		CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs)
+
+		// Check account storage map data
+		for address, accountValues := range accountsValues {
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+	})
+
+	t.Run("migrated accounts", func(t *testing.T) {
+		t.Parallel()
+
+		accountsInfo := []accountInfo{
+			{
+				address: address1,
+				domains: []domainInfo{
+					{domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+					{domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+				},
+			},
+			{
+				address: address2,
+				domains: []domainInfo{
+					{domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3},
+				},
+			},
+		}
+
+		ledger, accountsValues := newTestLedgerWithMigratedAccounts(t, nil, nil, accountsInfo)
+		storage := runtime.NewStorage(
+			ledger,
+			nil,
+			runtime.StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		migrator := runtime.NewDomainRegisterMigration(
+			ledger,
+			storage,
+			inter,
+			nil,
+			nil,
+		)
+
+		for _, address := range addresses {
+			accountStorageMap, err := migrator.MigrateAccount(address)
+			require.Nil(t, accountStorageMap)
+			require.NoError(t, err)
+		}
+
+		// Check account storage map data
+		for address, accountValues := range accountsValues {
+			checkAccountStorageMapData(
+				t,
+				ledger.StoredValues,
+				ledger.StorageIndices,
+				address,
+				accountValues,
+			)
+		}
+	})
+}
+
+type domainInfo struct {
+	domain                common.StorageDomain
+	domainStorageMapCount int
+	maxDepth              int
+}
+
+type accountInfo struct {
+	address common.Address
+	domains []domainInfo
+}
+
+func newTestLedgerWithUnmigratedAccounts(
+	tb testing.TB,
+	onRead LedgerOnRead,
+	onWrite LedgerOnWrite,
+	accounts []accountInfo,
+) (TestLedger, map[common.Address]accountStorageMapValues) {
+	ledger := NewTestLedger(nil, nil)
+	storage := runtime.NewStorage(
+		ledger,
+		nil,
+		runtime.StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+	// This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any
+	// domain register to match DomainStorageMap root slab.
+	const atreeValueValidationEnabled = true
+	const atreeStorageValidationEnabled = false
+	inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+		tb,
+		storage,
+		atreeValueValidationEnabled,
+		atreeStorageValidationEnabled,
+	)
+
+	random := rand.New(rand.NewSource(42))
+
+	accountsValues := make(map[common.Address]accountStorageMapValues)
+
+	var expectedDomainRootSlabIDs []atree.SlabID
+
+	for _, account := range accounts {
+
+		address := account.address
+
+		accountValues := make(accountStorageMapValues)
+
+		accountsValues[address] = accountValues
+
+		for _, domainInfo := range account.domains {
+
+			domain := domainInfo.domain
+			domainStorageMapCount := domainInfo.domainStorageMapCount
+			maxDepth := domainInfo.maxDepth
+
+			accountValues[domain] = make(domainStorageMapValues)
+
+			// Create domain storage map
+			domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+
+			// Write domain register
+			domainStorageMapValueID := domainStorageMap.ValueID()
+			err := ledger.SetValue(address[:], []byte(domain.Identifier()), domainStorageMapValueID[8:])
+			require.NoError(tb, err)
+
+			vid := domainStorageMap.ValueID()
+			expectedDomainRootSlabIDs = append(
+				expectedDomainRootSlabIDs,
+				atree.NewSlabID(atree.Address(address), atree.SlabIndex(vid[8:])))
+
+			// Write elements to to domain storage map
+			for len(accountValues[domain]) < domainStorageMapCount {
+
+				key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int()))
+
+				depth := random.Intn(maxDepth + 1)
+				value := randomCadenceValues(inter, address, depth, random)
+
+				_ = domainStorageMap.WriteValue(inter, key, value)
+
+				accountValues[domain][key] = value
+			}
+		}
+	}
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(tb, err)
+
+	CheckAtreeStorageHealth(tb, storage, expectedDomainRootSlabIDs)
+
+	// Create a new storage
+	newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices)
+
+	return newLedger, accountsValues
+}
+
+func newTestLedgerWithMigratedAccounts(
+	tb testing.TB,
+	onRead LedgerOnRead,
+	onWrite LedgerOnWrite,
+	accounts []accountInfo,
+) (TestLedger, map[common.Address]accountStorageMapValues) {
+	ledger := NewTestLedger(nil, nil)
+	storage := runtime.NewStorage(
+		ledger,
+		nil,
+		runtime.StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	// Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test.
+	// This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any
+	// domain register to match DomainStorageMap root slab.
+	const atreeValueValidationEnabled = true
+	const atreeStorageValidationEnabled = false
+	inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(
+		tb,
+		storage,
+		atreeValueValidationEnabled,
+		atreeStorageValidationEnabled,
+	)
+
+	random := rand.New(rand.NewSource(42))
+
+	expectedRootSlabIDs := make([]atree.SlabID, 0, len(accounts))
+
+	accountsValues := make(map[common.Address]accountStorageMapValues)
+
+	for _, account := range accounts {
+
+		address := account.address
+
+		accountValues := make(accountStorageMapValues)
+
+		accountsValues[address] = accountValues
+
+		accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address))
+
+		// Write account register
+		accountStorageMapSlabIndex := accountStorageMap.SlabID().Index()
+		err := ledger.SetValue(address[:], []byte(runtime.AccountStorageKey), accountStorageMapSlabIndex[:])
+		require.NoError(tb, err)
+
+		expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID())
+
+		for _, domainInfo := range account.domains {
+
+			domain := domainInfo.domain
+			domainStorageMapCount := domainInfo.domainStorageMapCount
+			maxDepth := domainInfo.maxDepth
+
+			accountValues[domain] = make(domainStorageMapValues)
+
+			// Create domain storage map
+			domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain)
+
+			// Write elements to to domain storage map
+			for len(accountValues[domain]) < domainStorageMapCount {
+
+				key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int()))
+
+				depth := random.Intn(maxDepth + 1)
+				value := randomCadenceValues(inter, address, depth, random)
+
+				_ = domainStorageMap.WriteValue(inter, key, value)
+
+				accountValues[domain][key] = value
+			}
+		}
+	}
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(tb, err)
+
+	CheckAtreeStorageHealth(tb, storage, expectedRootSlabIDs)
+
+	newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices)
+
+	return newLedger, accountsValues
+}
+
+func randomCadenceValues(
+	inter *interpreter.Interpreter,
+	address common.Address,
+	depth int,
+	random *rand.Rand,
+) interpreter.EquatableValue {
+	var typeIndex int
+	if depth == 0 {
+		typeIndex = random.Intn(typeLargeString + 1)
+	} else {
+		typeIndex = random.Intn(maxType)
+	}
+
+	switch typeIndex {
+	case typeUint8:
+		num := random.Intn(math.MaxUint8 + 1)
+		return interpreter.NewUnmeteredUInt8Value(uint8(num))
+
+	case typeUint16:
+		num := random.Intn(math.MaxUint16 + 1)
+		return interpreter.NewUnmeteredUInt16Value(uint16(num))
+
+	case typeUint32:
+		num := random.Uint32()
+		return interpreter.NewUnmeteredUInt32Value(num)
+
+	case typeUint64:
+		num := random.Uint64()
+		return interpreter.NewUnmeteredUInt64Value(num)
+
+	case typeSmallString:
+		const maxSmallStringLength = 32
+
+		size := random.Intn(maxSmallStringLength + 1)
+
+		b := make([]byte, size)
+		random.Read(b)
+		s := strings.ToValidUTF8(string(b), "$")
+		return interpreter.NewUnmeteredStringValue(s)
+
+	case typeLargeString:
+		const minLargeStringLength = 256
+		const maxLargeStringLength = 1024
+
+		size := random.Intn(maxLargeStringLength+1-minLargeStringLength) + minLargeStringLength
+
+		b := make([]byte, size)
+		random.Read(b)
+		s := strings.ToValidUTF8(string(b), "$")
+		return interpreter.NewUnmeteredStringValue(s)
+
+	case typeArray:
+		const minArrayLength = 1
+		const maxArrayLength = 20
+
+		size := random.Intn(maxArrayLength+1-minArrayLength) + minArrayLength
+
+		arrayType := interpreter.NewVariableSizedStaticType(
+			nil,
+			interpreter.PrimitiveStaticTypeAny,
+		)
+
+		depth--
+
+		values := make([]interpreter.Value, size)
+		for i := range size {
+			values[i] = randomCadenceValues(inter, common.ZeroAddress, depth, random)
+		}
+
+		return interpreter.NewArrayValue(
+			inter,
+			interpreter.EmptyLocationRange,
+			arrayType,
+			address,
+			values...,
+		)
+
+	case typeDictionary:
+		const minDictLength = 1
+		const maxDictLength = 20
+
+		size := random.Intn(maxDictLength+1-minDictLength) + minDictLength
+
+		dictType := interpreter.NewDictionaryStaticType(
+			nil,
+			interpreter.PrimitiveStaticTypeAny,
+			interpreter.PrimitiveStaticTypeAny,
+		)
+
+		depth--
+
+		keyAndValues := make([]interpreter.Value, 0, size*2)
+		for i := range size * 2 {
+			if i%2 == 0 {
+				// Key (0 depth for element)
+				keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, 0, random))
+			} else {
+				// Value (decremented depth for element)
+				keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, depth, random))
+			}
+		}
+
+		return interpreter.NewDictionaryValueWithAddress(inter, interpreter.EmptyLocationRange, dictType, address, keyAndValues...)
+
+	default:
+		panic(errors.NewUnreachableError())
+	}
+}
+
+const (
+	typeUint8 = iota
+	typeUint16
+	typeUint32
+	typeUint64
+	typeSmallString
+	typeLargeString
+	typeArray
+	typeDictionary
+	maxType
+)
diff --git a/runtime/runtime.go b/runtime/runtime.go
index c6277c55ae..d516127ac8 100644
--- a/runtime/runtime.go
+++ b/runtime/runtime.go
@@ -558,7 +558,15 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In
 
 	codesAndPrograms := NewCodesAndPrograms()
 
-	storage := NewStorage(context.Interface, context.Interface)
+	runtimeInterface := context.Interface
+
+	storage := NewStorage(
+		runtimeInterface,
+		runtimeInterface,
+		StorageConfig{
+			StorageFormatV2Enabled: r.defaultConfig.StorageFormatV2Enabled,
+		},
+	)
 
 	environment := context.Environment
 	if environment == nil {
@@ -566,7 +574,7 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In
 	}
 
 	environment.Configure(
-		context.Interface,
+		runtimeInterface,
 		codesAndPrograms,
 		storage,
 		context.CoverageReport,
diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go
index 64512a1091..6e6687e4c8 100644
--- a/runtime/runtime_memory_metering_test.go
+++ b/runtime/runtime_memory_metering_test.go
@@ -815,7 +815,9 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) {
 				// Before the storageUsed function is invoked, the deltas must have been committed.
 				// So the encoded slabs must have been metered at this point.
 				assert.Equal(t, uint64(0), meter.getMemory(common.MemoryKindAtreeEncodedSlab))
+
 				storageUsedInvoked = true
+
 				return 1, nil
 			},
 		}
@@ -840,85 +842,152 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) {
 	t.Run("account.storage.save", func(t *testing.T) {
 		t.Parallel()
 
-		code := []byte(`
-            transaction {
-                prepare(signer: auth(Storage) &Account) {
-                    signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test)
-                }
-            }
-        `)
+		test := func(storageFormatV2Enabled bool) {
 
-		meter := newTestMemoryGauge()
+			name := fmt.Sprintf(
+				"storage format V2 enabled: %v",
+				storageFormatV2Enabled,
+			)
 
-		runtimeInterface := &TestRuntimeInterface{
-			Storage: NewTestLedger(nil, nil),
-			OnGetSigningAccounts: func() ([]Address, error) {
-				return []Address{{42}}, nil
-			},
-			OnMeterMemory: meter.MeterMemory,
-		}
+			t.Run(name, func(t *testing.T) {
+				t.Parallel()
 
-		runtime := NewTestInterpreterRuntime()
+				code := []byte(`
+                    transaction {
+                        prepare(signer: auth(Storage) &Account) {
+                            signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test)
+                        }
+                    }
+                `)
 
-		err := runtime.ExecuteTransaction(
-			Script{
-				Source: code,
-			},
-			Context{
-				Interface: runtimeInterface,
-				Location:  common.TransactionLocation{},
-			},
-		)
+				meter := newTestMemoryGauge()
 
-		require.NoError(t, err)
-		assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab))
+				runtimeInterface := &TestRuntimeInterface{
+					Storage: NewTestLedger(nil, nil),
+					OnGetSigningAccounts: func() ([]Address, error) {
+						return []Address{{42}}, nil
+					},
+					OnMeterMemory: meter.MeterMemory,
+				}
+
+				config := DefaultTestInterpreterConfig
+				config.StorageFormatV2Enabled = storageFormatV2Enabled
+				runtime := NewTestInterpreterRuntimeWithConfig(config)
+
+				err := runtime.ExecuteTransaction(
+					Script{
+						Source: code,
+					},
+					Context{
+						Interface: runtimeInterface,
+						Location:  common.TransactionLocation{},
+					},
+				)
+
+				require.NoError(t, err)
+
+				var expected uint64
+				if storageFormatV2Enabled {
+					expected = 5
+				} else {
+					expected = 4
+				}
+				assert.Equal(t,
+					expected,
+					meter.getMemory(common.MemoryKindAtreeEncodedSlab),
+				)
+			})
+		}
+
+		for _, storageFormatV2Enabled := range []bool{false, true} {
+			test(storageFormatV2Enabled)
+		}
 	})
 
 	t.Run("storage used non empty", func(t *testing.T) {
 		t.Parallel()
 
-		code := []byte(`
-            transaction {
-                prepare(signer: auth(Storage) &Account) {
-                    signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test)
-                    signer.storage.used
-                }
-            }
-        `)
+		test := func(storageFormatV2Enabled bool) {
 
-		meter := newTestMemoryGauge()
-		storageUsedInvoked := false
+			name := fmt.Sprintf(
+				"storage format V2 enabled: %v",
+				storageFormatV2Enabled,
+			)
 
-		runtimeInterface := &TestRuntimeInterface{
-			Storage: NewTestLedger(nil, nil),
-			OnGetSigningAccounts: func() ([]Address, error) {
-				return []Address{{42}}, nil
-			},
-			OnMeterMemory: meter.MeterMemory,
-			OnGetStorageUsed: func(_ Address) (uint64, error) {
-				// Before the storageUsed function is invoked, the deltas must have been committed.
-				// So the encoded slabs must have been metered at this point.
-				assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab))
-				storageUsedInvoked = true
-				return 1, nil
-			},
-		}
+			t.Run(name, func(t *testing.T) {
+				t.Parallel()
 
-		runtime := NewTestInterpreterRuntime()
+				code := []byte(`
+                    transaction {
+                        prepare(signer: auth(Storage) &Account) {
+                            signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test)
+                            signer.storage.used
+                        }
+                    }
+                `)
 
-		err := runtime.ExecuteTransaction(
-			Script{
-				Source: code,
-			},
-			Context{
-				Interface: runtimeInterface,
-				Location:  common.TransactionLocation{},
-			},
-		)
+				meter := newTestMemoryGauge()
+				storageUsedInvoked := false
 
-		require.NoError(t, err)
-		assert.True(t, storageUsedInvoked)
-		assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab))
+				runtimeInterface := &TestRuntimeInterface{
+					Storage: NewTestLedger(nil, nil),
+					OnGetSigningAccounts: func() ([]Address, error) {
+						return []Address{{42}}, nil
+					},
+					OnMeterMemory: meter.MeterMemory,
+					OnGetStorageUsed: func(_ Address) (uint64, error) {
+						// Before the storageUsed function is invoked, the deltas must have been committed.
+						// So the encoded slabs must have been metered at this point.
+						var expected uint64
+						if storageFormatV2Enabled {
+							expected = 5
+						} else {
+							expected = 4
+						}
+						assert.Equal(t,
+							expected,
+							meter.getMemory(common.MemoryKindAtreeEncodedSlab),
+						)
+
+						storageUsedInvoked = true
+
+						return 1, nil
+					},
+				}
+
+				config := DefaultTestInterpreterConfig
+				config.StorageFormatV2Enabled = storageFormatV2Enabled
+				runtime := NewTestInterpreterRuntimeWithConfig(config)
+
+				err := runtime.ExecuteTransaction(
+					Script{
+						Source: code,
+					},
+					Context{
+						Interface: runtimeInterface,
+						Location:  common.TransactionLocation{},
+					},
+				)
+
+				require.NoError(t, err)
+				assert.True(t, storageUsedInvoked)
+
+				var expected uint64
+				if storageFormatV2Enabled {
+					expected = 5
+				} else {
+					expected = 4
+				}
+				assert.Equal(t,
+					expected,
+					meter.getMemory(common.MemoryKindAtreeEncodedSlab),
+				)
+			})
+		}
+
+		for _, storageFormatV2Enabled := range []bool{false, true} {
+			test(storageFormatV2Enabled)
+		}
 	})
 }
 
@@ -1036,143 +1105,226 @@ func TestRuntimeMeterEncoding(t *testing.T) {
 
 		t.Parallel()
 
-		config := DefaultTestInterpreterConfig
-		config.AtreeValidationEnabled = false
-		rt := NewTestInterpreterRuntimeWithConfig(config)
+		test := func(storageFormatV2Enabled bool) {
 
-		address := common.MustBytesToAddress([]byte{0x1})
-		storage := NewTestLedger(nil, nil)
-		meter := newTestMemoryGauge()
+			name := fmt.Sprintf(
+				"storage format V2 enabled: %v",
+				storageFormatV2Enabled,
+			)
 
-		runtimeInterface := &TestRuntimeInterface{
-			Storage: storage,
-			OnGetSigningAccounts: func() ([]Address, error) {
-				return []Address{address}, nil
-			},
-			OnMeterMemory: meter.MeterMemory,
-		}
+			t.Run(name, func(t *testing.T) {
+				t.Parallel()
 
-		text := "A quick brown fox jumps over the lazy dog"
+				config := DefaultTestInterpreterConfig
+				config.AtreeValidationEnabled = false
+				config.StorageFormatV2Enabled = storageFormatV2Enabled
+				rt := NewTestInterpreterRuntimeWithConfig(config)
 
-		err := rt.ExecuteTransaction(
-			Script{
-				Source: []byte(fmt.Sprintf(`
-                transaction() {
-                    prepare(acc: auth(Storage) &Account) {
-                        var s = "%s"
-                        acc.storage.save(s, to:/storage/some_path)
-                    }
-                }`,
-					text,
-				)),
-			},
-			Context{
-				Interface: runtimeInterface,
-				Location:  common.TransactionLocation{},
-			},
-		)
+				address := common.MustBytesToAddress([]byte{0x1})
+				storage := NewTestLedger(nil, nil)
+				meter := newTestMemoryGauge()
 
-		require.NoError(t, err)
-		assert.Equal(t, 75, int(meter.getMemory(common.MemoryKindBytes)))
+				runtimeInterface := &TestRuntimeInterface{
+					Storage: storage,
+					OnGetSigningAccounts: func() ([]Address, error) {
+						return []Address{address}, nil
+					},
+					OnMeterMemory: meter.MeterMemory,
+				}
+
+				text := "A quick brown fox jumps over the lazy dog"
+
+				err := rt.ExecuteTransaction(
+					Script{
+						Source: []byte(fmt.Sprintf(`
+                            transaction() {
+                                prepare(acc: auth(Storage) &Account) {
+                                    var s = "%s"
+                                    acc.storage.save(s, to:/storage/some_path)
+                                }
+                            }`,
+							text,
+						)),
+					},
+					Context{
+						Interface: runtimeInterface,
+						Location:  common.TransactionLocation{},
+					},
+				)
+
+				require.NoError(t, err)
+
+				var expected uint64
+				if storageFormatV2Enabled {
+					expected = 107
+				} else {
+					expected = 75
+				}
+				assert.Equal(t,
+					expected,
+					meter.getMemory(common.MemoryKindBytes),
+				)
+			})
+		}
+
+		for _, storageFormatV2Enabled := range []bool{false, true} {
+			test(storageFormatV2Enabled)
+		}
 	})
 
 	t.Run("string in loop", func(t *testing.T) {
 
 		t.Parallel()
 
-		config := DefaultTestInterpreterConfig
-		config.AtreeValidationEnabled = false
-		rt := NewTestInterpreterRuntimeWithConfig(config)
+		test := func(storageFormatV2Enabled bool) {
 
-		address := common.MustBytesToAddress([]byte{0x1})
-		storage := NewTestLedger(nil, nil)
-		meter := newTestMemoryGauge()
+			name := fmt.Sprintf(
+				"storage format V2 enabled: %v",
+				storageFormatV2Enabled,
+			)
 
-		runtimeInterface := &TestRuntimeInterface{
-			Storage: storage,
-			OnGetSigningAccounts: func() ([]Address, error) {
-				return []Address{address}, nil
-			},
-			OnMeterMemory: meter.MeterMemory,
-		}
+			t.Run(name, func(t *testing.T) {
+				t.Parallel()
 
-		text := "A quick brown fox jumps over the lazy dog"
+				config := DefaultTestInterpreterConfig
+				config.AtreeValidationEnabled = false
+				config.StorageFormatV2Enabled = storageFormatV2Enabled
+				rt := NewTestInterpreterRuntimeWithConfig(config)
 
-		err := rt.ExecuteTransaction(
-			Script{
-				Source: []byte(fmt.Sprintf(`
-                transaction() {
-                    prepare(acc: auth(Storage) &Account) {
-                        var i = 0
-                        var s = "%s"
-                        while i<1000 {
-                            let path = StoragePath(identifier: "i".concat(i.toString()))!
-                            acc.storage.save(s, to: path)
-                            i=i+1
-                        }
-                    }
-                }`,
-					text,
-				)),
-			},
-			Context{
-				Interface: runtimeInterface,
-				Location:  common.TransactionLocation{},
-			},
-		)
+				address := common.MustBytesToAddress([]byte{0x1})
+				storage := NewTestLedger(nil, nil)
+				meter := newTestMemoryGauge()
 
-		require.NoError(t, err)
-		assert.Equal(t, 61455, int(meter.getMemory(common.MemoryKindBytes)))
+				runtimeInterface := &TestRuntimeInterface{
+					Storage: storage,
+					OnGetSigningAccounts: func() ([]Address, error) {
+						return []Address{address}, nil
+					},
+					OnMeterMemory: meter.MeterMemory,
+				}
+
+				text := "A quick brown fox jumps over the lazy dog"
+
+				err := rt.ExecuteTransaction(
+					Script{
+						Source: []byte(fmt.Sprintf(`
+                            transaction() {
+                                prepare(acc: auth(Storage) &Account) {
+                                    var i = 0
+                                    var s = "%s"
+                                    while i<1000 {
+                                        let path = StoragePath(identifier: "i".concat(i.toString()))!
+                                        acc.storage.save(s, to: path)
+                                        i=i+1
+                                    }
+                                }
+                            }`,
+							text,
+						)),
+					},
+					Context{
+						Interface: runtimeInterface,
+						Location:  common.TransactionLocation{},
+					},
+				)
+
+				require.NoError(t, err)
+
+				var expected uint64
+				if storageFormatV2Enabled {
+					expected = 61494
+				} else {
+					expected = 61455
+				}
+				assert.Equal(t,
+					expected,
+					meter.getMemory(common.MemoryKindBytes),
+				)
+			})
+		}
+
+		for _, storageFormatV2Enabled := range []bool{false, true} {
+			test(storageFormatV2Enabled)
+		}
 	})
 
 	t.Run("composite", func(t *testing.T) {
 
 		t.Parallel()
 
-		config := DefaultTestInterpreterConfig
-		config.AtreeValidationEnabled = false
-		rt := NewTestInterpreterRuntimeWithConfig(config)
+		test := func(storageFormatV2Enabled bool) {
 
-		address := common.MustBytesToAddress([]byte{0x1})
-		storage := NewTestLedger(nil, nil)
-		meter := newTestMemoryGauge()
+			name := fmt.Sprintf(
+				"storage format V2 enabled: %v",
+				storageFormatV2Enabled,
+			)
 
-		runtimeInterface := &TestRuntimeInterface{
-			Storage: storage,
-			OnGetSigningAccounts: func() ([]Address, error) {
-				return []Address{address}, nil
-			},
-			OnMeterMemory: meter.MeterMemory,
-		}
+			t.Run(name, func(t *testing.T) {
+				t.Parallel()
 
-		_, err := rt.ExecuteScript(
-			Script{
-				Source: []byte(`
-                access(all) fun main() {
-                    let acc = getAuthAccount<auth(Storage) &Account>(0x02)
-                    var i = 0
-                    var f = Foo()
-                    while i<1000 {
-                        let path = StoragePath(identifier: "i".concat(i.toString()))!
-                        acc.storage.save(f, to: path)
-                        i=i+1
-                    }
-                }
+				config := DefaultTestInterpreterConfig
+				config.AtreeValidationEnabled = false
+				config.StorageFormatV2Enabled = storageFormatV2Enabled
+				rt := NewTestInterpreterRuntimeWithConfig(config)
 
-                access(all) struct Foo {
-                    access(self) var id: Int
-                    init() {
-                        self.id = 123456789
-                    }
-                }`),
-			},
-			Context{
-				Interface: runtimeInterface,
-				Location:  common.ScriptLocation{},
-			},
-		)
+				address := common.MustBytesToAddress([]byte{0x1})
+				storage := NewTestLedger(nil, nil)
+				meter := newTestMemoryGauge()
 
-		require.NoError(t, err)
-		assert.Equal(t, 58323, int(meter.getMemory(common.MemoryKindBytes)))
+				runtimeInterface := &TestRuntimeInterface{
+					Storage: storage,
+					OnGetSigningAccounts: func() ([]Address, error) {
+						return []Address{address}, nil
+					},
+					OnMeterMemory: meter.MeterMemory,
+				}
+
+				_, err := rt.ExecuteScript(
+					Script{
+						Source: []byte(`
+                            access(all) fun main() {
+                                let acc = getAuthAccount<auth(Storage) &Account>(0x02)
+                                var i = 0
+                                var f = Foo()
+                                while i<1000 {
+                                    let path = StoragePath(identifier: "i".concat(i.toString()))!
+                                    acc.storage.save(f, to: path)
+                                    i=i+1
+                                }
+                            }
+
+                            access(all) struct Foo {
+                                access(self) var id: Int
+                                init() {
+                                    self.id = 123456789
+                                }
+                            }
+                        `),
+					},
+					Context{
+						Interface: runtimeInterface,
+						Location:  common.ScriptLocation{},
+					},
+				)
+
+				require.NoError(t, err)
+
+				var expected uint64
+				if storageFormatV2Enabled {
+					expected = 58362
+				} else {
+					expected = 58323
+				}
+
+				assert.Equal(t,
+					expected,
+					meter.getMemory(common.MemoryKindBytes),
+				)
+			})
+		}
+
+		for _, storageFormatV2Enabled := range []bool{false, true} {
+			test(storageFormatV2Enabled)
+		}
 	})
 }
diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go
index e293631904..44d1e5ed56 100644
--- a/runtime/runtime_test.go
+++ b/runtime/runtime_test.go
@@ -5683,102 +5683,159 @@ func TestRuntimeContractWriteback(t *testing.T) {
 
 	t.Parallel()
 
-	runtime := NewTestInterpreterRuntime()
-
 	addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE})
 
-	contract := []byte(`
-      access(all) contract Test {
+	test := func(
+		storageFormatV2Enabled bool,
+		expectedDeployTxWrites []ownerKeyPair,
+		expectedWriteTxWrites []ownerKeyPair,
+	) {
 
-          access(all) var test: Int
+		name := fmt.Sprintf(
+			"storage format V2 enabled: %v",
+			storageFormatV2Enabled,
+		)
 
-          init() {
-              self.test = 1
-          }
+		t.Run(name, func(t *testing.T) {
+			t.Parallel()
 
-		  access(all) fun setTest(_ test: Int) {
-			self.test = test
-		  }
-      }
-    `)
+			config := DefaultTestInterpreterConfig
+			config.StorageFormatV2Enabled = storageFormatV2Enabled
+			runtime := NewTestInterpreterRuntimeWithConfig(config)
 
-	deploy := DeploymentTransaction("Test", contract)
+			contract := []byte(`
+              access(all) contract Test {
 
-	readTx := []byte(`
-      import Test from 0xCADE
+                  access(all) var test: Int
 
-       transaction {
+                  init() {
+                      self.test = 1
+                  }
 
-          prepare(signer: &Account) {
-              log(Test.test)
-          }
-       }
-    `)
+                  access(all) fun setTest(_ test: Int) {
+                    self.test = test
+                  }
+              }
+            `)
 
-	writeTx := []byte(`
-      import Test from 0xCADE
+			deploy := DeploymentTransaction("Test", contract)
 
-       transaction {
+			readTx := []byte(`
+              import Test from 0xCADE
 
-          prepare(signer: &Account) {
-              Test.setTest(2)
-          }
-       }
-    `)
+               transaction {
 
-	var accountCode []byte
-	var events []cadence.Event
-	var loggedMessages []string
-	var writes []ownerKeyPair
+                  prepare(signer: &Account) {
+                      log(Test.test)
+                  }
+               }
+            `)
 
-	onWrite := func(owner, key, value []byte) {
-		writes = append(writes, ownerKeyPair{
-			owner,
-			key,
-		})
-	}
+			writeTx := []byte(`
+              import Test from 0xCADE
 
-	runtimeInterface := &TestRuntimeInterface{
-		OnGetCode: func(_ Location) (bytes []byte, err error) {
-			return accountCode, nil
-		},
-		Storage: NewTestLedger(nil, onWrite),
-		OnGetSigningAccounts: func() ([]Address, error) {
-			return []Address{Address(addressValue)}, nil
-		},
-		OnResolveLocation: NewSingleIdentifierLocationResolver(t),
-		OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) {
-			return accountCode, nil
-		},
-		OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) {
-			accountCode = code
-			return nil
-		},
-		OnEmitEvent: func(event cadence.Event) error {
-			events = append(events, event)
-			return nil
-		},
-		OnProgramLog: func(message string) {
-			loggedMessages = append(loggedMessages, message)
-		},
-	}
+               transaction {
 
-	nextTransactionLocation := NewTransactionLocationGenerator()
+                  prepare(signer: &Account) {
+                      Test.setTest(2)
+                  }
+               }
+            `)
 
-	err := runtime.ExecuteTransaction(
-		Script{
-			Source: deploy,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
+			var accountCode []byte
+			var events []cadence.Event
+			var loggedMessages []string
+			var writes []ownerKeyPair
 
-	assert.NotNil(t, accountCode)
+			onWrite := func(owner, key, value []byte) {
+				writes = append(writes, ownerKeyPair{
+					owner,
+					key,
+				})
+			}
 
-	assert.Equal(t,
+			runtimeInterface := &TestRuntimeInterface{
+				OnGetCode: func(_ Location) (bytes []byte, err error) {
+					return accountCode, nil
+				},
+				Storage: NewTestLedger(nil, onWrite),
+				OnGetSigningAccounts: func() ([]Address, error) {
+					return []Address{Address(addressValue)}, nil
+				},
+				OnResolveLocation: NewSingleIdentifierLocationResolver(t),
+				OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) {
+					return accountCode, nil
+				},
+				OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) {
+					accountCode = code
+					return nil
+				},
+				OnEmitEvent: func(event cadence.Event) error {
+					events = append(events, event)
+					return nil
+				},
+				OnProgramLog: func(message string) {
+					loggedMessages = append(loggedMessages, message)
+				},
+			}
+
+			nextTransactionLocation := NewTransactionLocationGenerator()
+
+			err := runtime.ExecuteTransaction(
+				Script{
+					Source: deploy,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.NotNil(t, accountCode)
+
+			assert.Equal(t,
+				expectedDeployTxWrites,
+				writes,
+			)
+
+			writes = nil
+
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: readTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.Empty(t, writes)
+
+			writes = nil
+
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: writeTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.Equal(t,
+				expectedWriteTxWrites,
+				writes,
+			)
+
+		})
+	}
+
+	test(false,
 		[]ownerKeyPair{
 			// storage index to contract domain storage map
 			{
@@ -5796,47 +5853,55 @@ func TestRuntimeContractWriteback(t *testing.T) {
 				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
 			},
 		},
-		writes,
-	)
 
-	writes = nil
-
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: readTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
+		[]ownerKeyPair{
+			// Storage map is modified because contract value is inlined in contract storage map.
+			// NOTE: contract value slab doesn't exist.
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
 		},
 	)
-	require.NoError(t, err)
-
-	assert.Empty(t, writes)
 
-	writes = nil
+	test(
+		true,
 
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: writeTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
+		[]ownerKeyPair{
+			// storage index to account storage map
+			{
+				addressValue[:],
+				[]byte(AccountStorageKey),
+			},
+			// contract value
+			// NOTE: contract value is empty because it is inlined in contract domain storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+			},
+			// contract domain storage map
+			// NOTE: contract domain storage map is empty because it is inlined in account storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
+			// account storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
+			},
 		},
-	)
-	require.NoError(t, err)
 
-	assert.Equal(t,
 		[]ownerKeyPair{
-			// Storage map is modified because contract value is inlined in contract storage map.
-			// NOTE: contract value slab doesn't exist.
+			// Account storage map is modified because:
+			// - contract value is inlined in contract storage map, and
+			// - contract storage map is inlined in account storage map.
+			// NOTE: both contract storage map slab and contract value slab don't exist.
 			{
 				addressValue[:],
 				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
 			},
 		},
-		writes,
 	)
 }
 
@@ -5844,88 +5909,195 @@ func TestRuntimeStorageWriteback(t *testing.T) {
 
 	t.Parallel()
 
-	runtime := NewTestInterpreterRuntime()
-
 	addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE})
 
-	contract := []byte(`
-      access(all) contract Test {
+	test := func(
+		storageFormatV2Enabled bool,
+		expectedDeployTxWrites []ownerKeyPair,
+		expectedSaveToStorageTxWrites []ownerKeyPair,
+		expectedModifyStorageTxWrites []ownerKeyPair,
+	) {
 
-          access(all) resource R {
+		name := fmt.Sprintf(
+			"storage format V2 enabled: %v",
+			storageFormatV2Enabled,
+		)
+		t.Run(name, func(t *testing.T) {
+			t.Parallel()
+
+			config := DefaultTestInterpreterConfig
+			config.StorageFormatV2Enabled = storageFormatV2Enabled
+			runtime := NewTestInterpreterRuntimeWithConfig(config)
+
+			contract := []byte(`
+              access(all) contract Test {
+
+                  access(all) resource R {
+
+                      access(all) var test: Int
+
+                      init() {
+                          self.test = 1
+                      }
+
+                      access(all) fun setTest(_ test: Int) {
+                        self.test = test
+                      }
+                  }
 
-              access(all) var test: Int
 
-              init() {
-                  self.test = 1
+                  access(all) fun createR(): @R {
+                      return <-create R()
+                  }
               }
+            `)
 
-			  access(all) fun setTest(_ test: Int) {
-				self.test = test
-			  }
-          }
+			deploy := DeploymentTransaction("Test", contract)
 
+			var accountCode []byte
+			var events []cadence.Event
+			var loggedMessages []string
+			var writes []ownerKeyPair
 
-          access(all) fun createR(): @R {
-              return <-create R()
-          }
-      }
-    `)
+			onWrite := func(owner, key, _ []byte) {
+				writes = append(writes, ownerKeyPair{
+					owner,
+					key,
+				})
+			}
 
-	deploy := DeploymentTransaction("Test", contract)
+			runtimeInterface := &TestRuntimeInterface{
+				OnGetCode: func(_ Location) (bytes []byte, err error) {
+					return accountCode, nil
+				},
+				Storage: NewTestLedger(nil, onWrite),
+				OnGetSigningAccounts: func() ([]Address, error) {
+					return []Address{Address(addressValue)}, nil
+				},
+				OnResolveLocation: NewSingleIdentifierLocationResolver(t),
+				OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
+					return accountCode, nil
+				},
+				OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
+					accountCode = code
+					return nil
+				},
+				OnEmitEvent: func(event cadence.Event) error {
+					events = append(events, event)
+					return nil
+				},
+				OnProgramLog: func(message string) {
+					loggedMessages = append(loggedMessages, message)
+				},
+			}
 
-	var accountCode []byte
-	var events []cadence.Event
-	var loggedMessages []string
-	var writes []ownerKeyPair
+			nextTransactionLocation := NewTransactionLocationGenerator()
 
-	onWrite := func(owner, key, _ []byte) {
-		writes = append(writes, ownerKeyPair{
-			owner,
-			key,
-		})
-	}
+			err := runtime.ExecuteTransaction(
+				Script{
+					Source: deploy,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
 
-	runtimeInterface := &TestRuntimeInterface{
-		OnGetCode: func(_ Location) (bytes []byte, err error) {
-			return accountCode, nil
-		},
-		Storage: NewTestLedger(nil, onWrite),
-		OnGetSigningAccounts: func() ([]Address, error) {
-			return []Address{Address(addressValue)}, nil
-		},
-		OnResolveLocation: NewSingleIdentifierLocationResolver(t),
-		OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
-			return accountCode, nil
-		},
-		OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
-			accountCode = code
-			return nil
-		},
-		OnEmitEvent: func(event cadence.Event) error {
-			events = append(events, event)
-			return nil
-		},
-		OnProgramLog: func(message string) {
-			loggedMessages = append(loggedMessages, message)
-		},
-	}
+			assert.NotNil(t, accountCode)
 
-	nextTransactionLocation := NewTransactionLocationGenerator()
+			assert.Equal(t,
+				expectedDeployTxWrites,
+				writes,
+			)
 
-	err := runtime.ExecuteTransaction(
-		Script{
-			Source: deploy,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
+			writes = nil
 
-	assert.NotNil(t, accountCode)
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: []byte(`
+                       import Test from 0xCADE
 
-	assert.Equal(t,
+                        transaction {
+
+                           prepare(signer: auth(Storage) &Account) {
+                               signer.storage.save(<-Test.createR(), to: /storage/r)
+                           }
+                        }
+                    `),
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.Equal(t,
+				expectedSaveToStorageTxWrites,
+				writes,
+			)
+
+			readTx := []byte(`
+              import Test from 0xCADE
+
+              transaction {
+
+                 prepare(signer: auth(Storage) &Account) {
+                     log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test)
+                 }
+              }
+            `)
+
+			writes = nil
+
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: readTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.Empty(t, writes)
+
+			writeTx := []byte(`
+              import Test from 0xCADE
+
+              transaction {
+
+                 prepare(signer: auth(Storage) &Account) {
+                     let r = signer.storage.borrow<&Test.R>(from: /storage/r)!
+                     r.setTest(2)
+                 }
+              }
+            `)
+
+			writes = nil
+
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: writeTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
+
+			assert.Equal(t,
+				expectedModifyStorageTxWrites,
+				writes,
+			)
+		})
+	}
+
+	test(
+		false,
 		[]ownerKeyPair{
 			// storage index to contract domain storage map
 			{
@@ -5944,32 +6116,6 @@ func TestRuntimeStorageWriteback(t *testing.T) {
 				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
 			},
 		},
-		writes,
-	)
-
-	writes = nil
-
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: []byte(`
-              import Test from 0xCADE
-
-               transaction {
-
-                  prepare(signer: auth(Storage) &Account) {
-                      signer.storage.save(<-Test.createR(), to: /storage/r)
-                  }
-               }
-            `),
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
-
-	assert.Equal(t,
 		[]ownerKeyPair{
 			// storage index to storage domain storage map
 			{
@@ -5989,70 +6135,73 @@ func TestRuntimeStorageWriteback(t *testing.T) {
 				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
 			},
 		},
-		writes,
-	)
-
-	readTx := []byte(`
-     import Test from 0xCADE
-
-      transaction {
-
-         prepare(signer: auth(Storage) &Account) {
-             log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test)
-         }
-      }
-    `)
-
-	writes = nil
-
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: readTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
+		[]ownerKeyPair{
+			// Storage map is modified because resource value is inlined in storage map
+			// NOTE: resource value slab is empty.
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
+			},
 		},
 	)
-	require.NoError(t, err)
-
-	assert.Empty(t, writes)
-
-	writeTx := []byte(`
-     import Test from 0xCADE
-
-      transaction {
-
-         prepare(signer: auth(Storage) &Account) {
-             let r = signer.storage.borrow<&Test.R>(from: /storage/r)!
-             r.setTest(2)
-         }
-      }
-    `)
 
-	writes = nil
-
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: writeTx,
+	test(
+		true,
+		[]ownerKeyPair{
+			// storage index to account storage map
+			{
+				addressValue[:],
+				[]byte(AccountStorageKey),
+			},
+			// contract value
+			// NOTE: contract value slab is empty because it is inlined in contract domain storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+			},
+			// contract domain storage map
+			// NOTE: contract domain storage map is empty because it is inlined in account storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
+			// account storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3},
+			},
 		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
+
+		[]ownerKeyPair{
+			// account storage map
+			// NOTE: account storage map is updated with new storage domain storage map (inlined).
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
+			// resource value
+			// NOTE: resource value slab is empty because it is inlined in storage domain storage map
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
+			},
+			// storage domain storage map
+			// NOTE: storage domain storage map is empty because it is inlined in account storage map.
+			{
+				addressValue[:],
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5},
+			},
 		},
-	)
-	require.NoError(t, err)
 
-	assert.Equal(t,
 		[]ownerKeyPair{
-			// Storage map is modified because resource value is inlined in storage map
+			// Account storage map is modified because resource value is inlined in storage map,
+			// and storage map is inlined in account storage map.
 			// NOTE: resource value slab is empty.
 			{
 				addressValue[:],
-				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4},
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
 			},
 		},
-		writes,
 	)
 }
 
@@ -7506,11 +7655,12 @@ func TestRuntimeComputationMetring(t *testing.T) {
 	t.Parallel()
 
 	type test struct {
-		name      string
-		code      string
-		ok        bool
-		hits      uint
-		intensity uint
+		name        string
+		code        string
+		ok          bool
+		hits        uint
+		v1Intensity uint
+		v2Intensity uint
 	}
 
 	compLimit := uint(6)
@@ -7519,116 +7669,143 @@ func TestRuntimeComputationMetring(t *testing.T) {
 		{
 			name: "Infinite while loop",
 			code: `
-          while true {}
-        `,
-			ok:        false,
-			hits:      compLimit,
-			intensity: 6,
+              while true {}
+            `,
+			ok:          false,
+			hits:        compLimit,
+			v1Intensity: 6,
+			v2Intensity: 6,
 		},
 		{
 			name: "Limited while loop",
 			code: `
-          var i = 0
-          while i < 5 {
-              i = i + 1
-          }
-        `,
-			ok:        false,
-			hits:      compLimit,
-			intensity: 6,
+              var i = 0
+              while i < 5 {
+                  i = i + 1
+              }
+            `,
+			ok:          false,
+			hits:        compLimit,
+			v1Intensity: 6,
+			v2Intensity: 6,
 		},
 		{
 			name: "statement + createArray + transferArray + too many for-in loop iterations",
 			code: `
-          for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {}
-        `,
-			ok:        false,
-			hits:      compLimit,
-			intensity: 6,
+              for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {}
+            `,
+			ok:          false,
+			hits:        compLimit,
+			v1Intensity: 6,
+			v2Intensity: 6,
 		},
 		{
 			name: "statement + createArray + transferArray + two for-in loop iterations",
 			code: `
-          for i in [1, 2] {}
-        `,
-			ok:        true,
-			hits:      4,
-			intensity: 4,
+              for i in [1, 2] {}
+            `,
+			ok:          true,
+			hits:        4,
+			v1Intensity: 4,
+			v2Intensity: 4,
 		},
 		{
 			name: "statement + functionInvocation + encoding",
 			code: `
-          acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path)
-        `,
-			ok:        true,
-			hits:      3,
-			intensity: 76,
+              acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path)
+            `,
+			ok:          true,
+			hits:        3,
+			v1Intensity: 76,
+			v2Intensity: 108,
 		},
 	}
 
-	for _, test := range tests {
+	for _, testCase := range tests {
 
-		t.Run(test.name, func(t *testing.T) {
+		t.Run(testCase.name, func(t *testing.T) {
 
-			script := []byte(
-				fmt.Sprintf(
-					`
-                  transaction {
-                      prepare(acc: auth(Storage) &Account) {
-                          %s
-                      }
-                  }
-                `,
-					test.code,
-				),
-			)
+			test := func(storageFormatV2Enabled bool) {
 
-			runtime := NewTestInterpreterRuntime()
+				name := fmt.Sprintf(
+					"storage format V2 enabled: %v",
+					storageFormatV2Enabled,
+				)
+				t.Run(name, func(t *testing.T) {
+					t.Parallel()
 
-			compErr := errors.New("computation exceeded limit")
-			var hits, totalIntensity uint
-			meterComputationFunc := func(kind common.ComputationKind, intensity uint) error {
-				hits++
-				totalIntensity += intensity
-				if hits >= compLimit {
-					return compErr
-				}
-				return nil
-			}
+					script := []byte(
+						fmt.Sprintf(
+							`
+                              transaction {
+                                  prepare(acc: auth(Storage) &Account) {
+                                      %s
+                                  }
+                              }
+                            `,
+							testCase.code,
+						),
+					)
+
+					config := DefaultTestInterpreterConfig
+					config.StorageFormatV2Enabled = storageFormatV2Enabled
+					runtime := NewTestInterpreterRuntimeWithConfig(config)
+
+					compErr := errors.New("computation exceeded limit")
+					var hits, totalIntensity uint
+					meterComputationFunc := func(kind common.ComputationKind, intensity uint) error {
+						hits++
+						totalIntensity += intensity
+						if hits >= compLimit {
+							return compErr
+						}
+						return nil
+					}
 
-			address := common.MustBytesToAddress([]byte{0x1})
+					address := common.MustBytesToAddress([]byte{0x1})
 
-			runtimeInterface := &TestRuntimeInterface{
-				Storage: NewTestLedger(nil, nil),
-				OnGetSigningAccounts: func() ([]Address, error) {
-					return []Address{address}, nil
-				},
-				OnMeterComputation: meterComputationFunc,
-			}
+					runtimeInterface := &TestRuntimeInterface{
+						Storage: NewTestLedger(nil, nil),
+						OnGetSigningAccounts: func() ([]Address, error) {
+							return []Address{address}, nil
+						},
+						OnMeterComputation: meterComputationFunc,
+					}
 
-			nextTransactionLocation := NewTransactionLocationGenerator()
+					nextTransactionLocation := NewTransactionLocationGenerator()
 
-			err := runtime.ExecuteTransaction(
-				Script{
-					Source: script,
-				},
-				Context{
-					Interface: runtimeInterface,
-					Location:  nextTransactionLocation(),
-				},
-			)
-			if test.ok {
-				require.NoError(t, err)
-			} else {
-				RequireError(t, err)
+					err := runtime.ExecuteTransaction(
+						Script{
+							Source: script,
+						},
+						Context{
+							Interface: runtimeInterface,
+							Location:  nextTransactionLocation(),
+						},
+					)
+					if testCase.ok {
+						require.NoError(t, err)
+					} else {
+						RequireError(t, err)
+
+						var executionErr Error
+						require.ErrorAs(t, err, &executionErr)
+						require.ErrorAs(t, err.(Error).Unwrap(), &compErr)
+					}
+
+					assert.Equal(t, testCase.hits, hits)
 
-				var executionErr Error
-				require.ErrorAs(t, err, &executionErr)
-				require.ErrorAs(t, err.(Error).Unwrap(), &compErr)
+					if storageFormatV2Enabled {
+						assert.Equal(t, testCase.v2Intensity, totalIntensity)
+					} else {
+						assert.Equal(t, testCase.v1Intensity, totalIntensity)
+					}
+				})
 			}
 
-			assert.Equal(t, test.hits, hits)
-			assert.Equal(t, test.intensity, totalIntensity)
+			for _, storageFormatV2Enabled := range []bool{false, true} {
+				test(storageFormatV2Enabled)
+			}
 		})
 	}
 }
diff --git a/runtime/script_executor.go b/runtime/script_executor.go
index ca07c4cb00..8a51088a3d 100644
--- a/runtime/script_executor.go
+++ b/runtime/script_executor.go
@@ -107,7 +107,13 @@ func (executor *interpreterScriptExecutor) preprocess() (err error) {
 
 	runtimeInterface := context.Interface
 
-	storage := NewStorage(runtimeInterface, runtimeInterface)
+	storage := NewStorage(
+		runtimeInterface,
+		runtimeInterface,
+		StorageConfig{
+			StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled,
+		},
+	)
 	executor.storage = storage
 
 	environment := context.Environment
diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go
index 4e3749a944..b2cd5cc495 100644
--- a/runtime/sharedstate_test.go
+++ b/runtime/sharedstate_test.go
@@ -19,6 +19,7 @@
 package runtime_test
 
 import (
+	"fmt"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
@@ -35,195 +36,291 @@ func TestRuntimeSharedState(t *testing.T) {
 
 	t.Parallel()
 
-	runtime := NewTestInterpreterRuntime()
-
 	signerAddress := common.MustBytesToAddress([]byte{0x1})
 
-	deploy1 := DeploymentTransaction("C1", []byte(`
-        access(all) contract C1 {
-            access(all) fun hello() {
-                log("Hello from C1!")
-            }
-        }
-    `))
+	test := func(
+		storageFormatV2Enabled bool,
+		expectedReads []ownerKeyPair,
+	) {
 
-	deploy2 := DeploymentTransaction("C2", []byte(`
-        access(all) contract C2 {
-            access(all) fun hello() {
-                log("Hello from C2!")
-            }
-        }
-    `))
+		name := fmt.Sprintf(
+			"storage format V2 enabled: %v",
+			storageFormatV2Enabled,
+		)
 
-	accountCodes := map[common.Location][]byte{}
+		t.Run(name, func(t *testing.T) {
+			t.Parallel()
 
-	var events []cadence.Event
-	var loggedMessages []string
+			config := DefaultTestInterpreterConfig
+			config.StorageFormatV2Enabled = storageFormatV2Enabled
+			config.AtreeValidationEnabled = false
+			runtime := NewTestInterpreterRuntimeWithConfig(config)
 
-	var interpreterState *interpreter.SharedState
+			deploy1 := DeploymentTransaction("C1", []byte(`
+                access(all) contract C1 {
+                    access(all) fun hello() {
+                        log("Hello from C1!")
+                    }
+                }
+            `))
 
-	var ledgerReads []ownerKeyPair
+			deploy2 := DeploymentTransaction("C2", []byte(`
+                access(all) contract C2 {
+                    access(all) fun hello() {
+                        log("Hello from C2!")
+                    }
+                }
+            `))
 
-	ledger := NewTestLedger(
-		func(owner, key, value []byte) {
-			ledgerReads = append(
-				ledgerReads,
-				ownerKeyPair{
-					owner: owner,
-					key:   key,
-				},
-			)
-		},
-		nil,
-	)
+			accountCodes := map[common.Location][]byte{}
 
-	runtimeInterface := &TestRuntimeInterface{
-		Storage: ledger,
-		OnGetSigningAccounts: func() ([]Address, error) {
-			return []Address{signerAddress}, nil
-		},
-		OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
-			accountCodes[location] = code
-			return nil
-		},
-		OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
-			code = accountCodes[location]
-			return code, nil
-		},
-		OnRemoveAccountContractCode: func(location common.AddressLocation) error {
-			delete(accountCodes, location)
-			return nil
-		},
-		OnResolveLocation: MultipleIdentifierLocationResolver,
-		OnProgramLog: func(message string) {
-			loggedMessages = append(loggedMessages, message)
-		},
-		OnEmitEvent: func(event cadence.Event) error {
-			events = append(events, event)
-			return nil
-		},
-		OnSetInterpreterSharedState: func(state *interpreter.SharedState) {
-			interpreterState = state
-		},
-		OnGetInterpreterSharedState: func() *interpreter.SharedState {
-			return interpreterState
-		},
-	}
+			var events []cadence.Event
+			var loggedMessages []string
 
-	environment := NewBaseInterpreterEnvironment(Config{})
+			var interpreterState *interpreter.SharedState
 
-	nextTransactionLocation := NewTransactionLocationGenerator()
+			var ledgerReads []ownerKeyPair
 
-	// Deploy contracts
+			ledger := NewTestLedger(
+				func(owner, key, value []byte) {
+					ledgerReads = append(
+						ledgerReads,
+						ownerKeyPair{
+							owner: owner,
+							key:   key,
+						},
+					)
+				},
+				nil,
+			)
 
-	for _, source := range [][]byte{
-		deploy1,
-		deploy2,
-	} {
-		err := runtime.ExecuteTransaction(
-			Script{
-				Source: source,
-			},
-			Context{
-				Interface:   runtimeInterface,
-				Location:    nextTransactionLocation(),
-				Environment: environment,
-			},
-		)
-		require.NoError(t, err)
-	}
+			runtimeInterface := &TestRuntimeInterface{
+				Storage: ledger,
+				OnGetSigningAccounts: func() ([]Address, error) {
+					return []Address{signerAddress}, nil
+				},
+				OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
+					accountCodes[location] = code
+					return nil
+				},
+				OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
+					code = accountCodes[location]
+					return code, nil
+				},
+				OnRemoveAccountContractCode: func(location common.AddressLocation) error {
+					delete(accountCodes, location)
+					return nil
+				},
+				OnResolveLocation: MultipleIdentifierLocationResolver,
+				OnProgramLog: func(message string) {
+					loggedMessages = append(loggedMessages, message)
+				},
+				OnEmitEvent: func(event cadence.Event) error {
+					events = append(events, event)
+					return nil
+				},
+				OnSetInterpreterSharedState: func(state *interpreter.SharedState) {
+					interpreterState = state
+				},
+				OnGetInterpreterSharedState: func() *interpreter.SharedState {
+					return interpreterState
+				},
+			}
+
+			environment := NewBaseInterpreterEnvironment(config)
+
+			nextTransactionLocation := NewTransactionLocationGenerator()
+
+			// Deploy contracts
+
+			for _, source := range [][]byte{
+				deploy1,
+				deploy2,
+			} {
+				err := runtime.ExecuteTransaction(
+					Script{
+						Source: source,
+					},
+					Context{
+						Interface:   runtimeInterface,
+						Location:    nextTransactionLocation(),
+						Environment: environment,
+					},
+				)
+				require.NoError(t, err)
+			}
+
+			assert.NotEmpty(t, accountCodes)
+
+			// Call C1.hello using transaction
+
+			loggedMessages = nil
+
+			err := runtime.ExecuteTransaction(
+				Script{
+					Source: []byte(`
+                        import C1 from 0x1
+
+                        transaction {
+                            prepare(signer: &Account) {
+                                C1.hello()
+                            }
+                        }
+                    `),
+					Arguments: nil,
+				},
+				Context{
+					Interface:   runtimeInterface,
+					Location:    nextTransactionLocation(),
+					Environment: environment,
+				},
+			)
+			require.NoError(t, err)
 
-	assert.NotEmpty(t, accountCodes)
+			assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages)
 
-	// Call C1.hello using transaction
+			// Call C1.hello manually
 
-	loggedMessages = nil
+			loggedMessages = nil
 
-	err := runtime.ExecuteTransaction(
-		Script{
-			Source: []byte(`
-                import C1 from 0x1
+			_, err = runtime.InvokeContractFunction(
+				common.AddressLocation{
+					Address: signerAddress,
+					Name:    "C1",
+				},
+				"hello",
+				nil,
+				nil,
+				Context{
+					Interface:   runtimeInterface,
+					Location:    nextTransactionLocation(),
+					Environment: environment,
+				},
+			)
+			require.NoError(t, err)
 
-                transaction {
-                    prepare(signer: &Account) {
-                        C1.hello()
-                    }
-                }
-            `),
-			Arguments: nil,
-		},
-		Context{
-			Interface:   runtimeInterface,
-			Location:    nextTransactionLocation(),
-			Environment: environment,
-		},
-	)
-	require.NoError(t, err)
+			assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages)
 
-	assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages)
+			// Call C2.hello manually
 
-	// Call C1.hello manually
+			loggedMessages = nil
 
-	loggedMessages = nil
+			_, err = runtime.InvokeContractFunction(
+				common.AddressLocation{
+					Address: signerAddress,
+					Name:    "C2",
+				},
+				"hello",
+				nil,
+				nil,
+				Context{
+					Interface:   runtimeInterface,
+					Location:    nextTransactionLocation(),
+					Environment: environment,
+				},
+			)
+			require.NoError(t, err)
 
-	_, err = runtime.InvokeContractFunction(
-		common.AddressLocation{
-			Address: signerAddress,
-			Name:    "C1",
-		},
-		"hello",
-		nil,
-		nil,
-		Context{
-			Interface:   runtimeInterface,
-			Location:    nextTransactionLocation(),
-			Environment: environment,
-		},
-	)
-	require.NoError(t, err)
+			assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages)
 
-	assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages)
+			// Assert shared state was used,
+			// i.e. data was not re-read
 
-	// Call C2.hello manually
+			require.Equal(t,
+				expectedReads,
+				ledgerReads,
+			)
+		})
+	}
 
-	loggedMessages = nil
+	test(
+		false,
 
-	_, err = runtime.InvokeContractFunction(
-		common.AddressLocation{
-			Address: signerAddress,
-			Name:    "C2",
-		},
-		"hello",
-		nil,
-		nil,
-		Context{
-			Interface:   runtimeInterface,
-			Location:    nextTransactionLocation(),
-			Environment: environment,
+		[]ownerKeyPair{
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainContract.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainContract.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
 		},
 	)
-	require.NoError(t, err)
-
-	assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages)
 
-	// Assert shared state was used,
-	// i.e. data was not re-read
-
-	require.Equal(t,
+	test(
+		true,
 		[]ownerKeyPair{
+			// Read account register to check if it is a migrated account
+			// Read returns no value.
+			{
+				owner: signerAddress[:],
+				key:   []byte(AccountStorageKey),
+			},
+			// Read contract domain register.
+			// Read returns no value.
 			{
 				owner: signerAddress[:],
 				key:   []byte(common.StorageDomainContract.Identifier()),
 			},
+			// Read all available domain registers to check if it is a new account
+			// Read returns no value.
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.PathDomainStorage.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.PathDomainPrivate.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.PathDomainPublic.Identifier()),
+			},
 			{
 				owner: signerAddress[:],
 				key:   []byte(common.StorageDomainContract.Identifier()),
 			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainInbox.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainCapabilityController.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainCapabilityControllerTag.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainPathCapability.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(common.StorageDomainAccountCapability.Identifier()),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(AccountStorageKey),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(AccountStorageKey),
+			},
+			{
+				owner: signerAddress[:],
+				key:   []byte(AccountStorageKey),
+			},
 			{
 				owner: signerAddress[:],
 				key:   []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
 			},
 		},
-		ledgerReads,
 	)
 }
diff --git a/runtime/slabindex.go b/runtime/slabindex.go
new file mode 100644
index 0000000000..00178608d0
--- /dev/null
+++ b/runtime/slabindex.go
@@ -0,0 +1,86 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime
+
+import (
+	"github.com/onflow/atree"
+
+	"github.com/onflow/cadence/common"
+	"github.com/onflow/cadence/errors"
+	"github.com/onflow/cadence/interpreter"
+)
+
+// readSlabIndexFromRegister returns register value as atree.SlabIndex.
+// This function returns error if
+// - underlying ledger panics, or
+// - underlying ledger returns error when retrieving ledger value, or
+// - retrieved ledger value is invalid (for atree.SlabIndex).
+func readSlabIndexFromRegister(
+	ledger atree.Ledger,
+	address common.Address,
+	key []byte,
+) (atree.SlabIndex, bool, error) {
+	var data []byte
+	var err error
+	errors.WrapPanic(func() {
+		data, err = ledger.GetValue(address[:], key)
+	})
+	if err != nil {
+		return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err)
+	}
+
+	dataLength := len(data)
+
+	if dataLength == 0 {
+		return atree.SlabIndex{}, false, nil
+	}
+
+	isStorageIndex := dataLength == storageIndexLength
+	if !isStorageIndex {
+		// Invalid data in register
+
+		// TODO: add dedicated error type?
+		return atree.SlabIndex{}, false, errors.NewUnexpectedError(
+			"invalid storage index for storage map of account '%x': expected length %d, got %d",
+			address[:], storageIndexLength, dataLength,
+		)
+	}
+
+	return atree.SlabIndex(data), true, nil
+}
+
+func writeSlabIndexToRegister(
+	ledger atree.Ledger,
+	address common.Address,
+	key []byte,
+	slabIndex atree.SlabIndex,
+) error {
+	var err error
+	errors.WrapPanic(func() {
+		err = ledger.SetValue(
+			address[:],
+			key,
+			slabIndex[:],
+		)
+	})
+	if err != nil {
+		return interpreter.WrappedExternalError(err)
+	}
+	return nil
+}
diff --git a/runtime/storage.go b/runtime/storage.go
index ecab014304..2345ddb8ed 100644
--- a/runtime/storage.go
+++ b/runtime/storage.go
@@ -32,19 +32,55 @@ import (
 	"github.com/onflow/cadence/interpreter"
 )
 
+const (
+	AccountStorageKey = "stored"
+)
+
+type StorageConfig struct {
+	StorageFormatV2Enabled bool
+}
+
+type StorageFormat uint8
+
+const (
+	StorageFormatUnknown StorageFormat = iota
+	StorageFormatV1
+	StorageFormatV2
+)
+
 type Storage struct {
 	*atree.PersistentSlabStorage
-	NewStorageMaps  *orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex]
-	storageMaps     map[interpreter.StorageDomainKey]*interpreter.StorageMap
+
+	// cachedDomainStorageMaps is a cache of domain storage maps.
+	// Key is StorageKey{address, domain} and value is domain storage map.
+	cachedDomainStorageMaps map[interpreter.StorageDomainKey]*interpreter.DomainStorageMap
+
+	// cachedV1Accounts contains the cached result of determining
+	// if the account is in storage format v1 or not.
+	cachedV1Accounts map[common.Address]bool
+
+	// contractUpdates is a cache of contract updates.
+	// Key is StorageKey{contract_address, contract_name} and value is contract composite value.
 	contractUpdates *orderedmap.OrderedMap[interpreter.StorageKey, *interpreter.CompositeValue]
-	Ledger          atree.Ledger
-	memoryGauge     common.MemoryGauge
+
+	Ledger atree.Ledger
+
+	memoryGauge common.MemoryGauge
+
+	Config StorageConfig
+
+	AccountStorageV1      *AccountStorageV1
+	AccountStorageV2      *AccountStorageV2
+	scheduledV2Migrations []common.Address
 }
 
 var _ atree.SlabStorage = &Storage{}
 var _ interpreter.Storage = &Storage{}
 
-func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage {
+func NewPersistentSlabStorage(
+	ledger atree.Ledger,
+	memoryGauge common.MemoryGauge,
+) *atree.PersistentSlabStorage {
 	decodeStorable := func(
 		decoder *cbor.StreamDecoder,
 		slabID atree.SlabID,
@@ -66,99 +102,293 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage {
 	}
 
 	ledgerStorage := atree.NewLedgerBaseStorage(ledger)
-	persistentSlabStorage := atree.NewPersistentSlabStorage(
+
+	return atree.NewPersistentSlabStorage(
 		ledgerStorage,
 		interpreter.CBOREncMode,
 		interpreter.CBORDecMode,
 		decodeStorable,
 		decodeTypeInfo,
 	)
+}
+
+func NewStorage(
+	ledger atree.Ledger,
+	memoryGauge common.MemoryGauge,
+	config StorageConfig,
+) *Storage {
+	persistentSlabStorage := NewPersistentSlabStorage(ledger, memoryGauge)
+
+	accountStorageV1 := NewAccountStorageV1(
+		ledger,
+		persistentSlabStorage,
+		memoryGauge,
+	)
+
+	var accountStorageV2 *AccountStorageV2
+	if config.StorageFormatV2Enabled {
+		accountStorageV2 = NewAccountStorageV2(
+			ledger,
+			persistentSlabStorage,
+			memoryGauge,
+		)
+	}
+
 	return &Storage{
 		Ledger:                ledger,
 		PersistentSlabStorage: persistentSlabStorage,
-		storageMaps:           map[interpreter.StorageDomainKey]*interpreter.StorageMap{},
 		memoryGauge:           memoryGauge,
+		Config:                config,
+		AccountStorageV1:      accountStorageV1,
+		AccountStorageV2:      accountStorageV2,
 	}
 }
 
 const storageIndexLength = 8
 
-func (s *Storage) GetStorageMap(
+// GetDomainStorageMap returns existing or new domain storage map for the given account and domain.
+func (s *Storage) GetDomainStorageMap(
+	inter *interpreter.Interpreter,
 	address common.Address,
 	domain common.StorageDomain,
 	createIfNotExists bool,
 ) (
-	storageMap *interpreter.StorageMap,
+	domainStorageMap *interpreter.DomainStorageMap,
 ) {
-	key := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain)
+	// Get cached domain storage map if it exists.
 
-	storageMap = s.storageMaps[key]
-	if storageMap == nil {
+	domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain)
 
-		// Load data through the runtime interface
+	if s.cachedDomainStorageMaps != nil {
+		domainStorageMap = s.cachedDomainStorageMaps[domainStorageKey]
+		if domainStorageMap != nil {
+			return domainStorageMap
+		}
+	}
 
-		var data []byte
-		var err error
-		errors.WrapPanic(func() {
-			data, err = s.Ledger.GetValue(
-				key.Address[:],
-				[]byte(key.Domain.Identifier()),
+	defer func() {
+		// Cache domain storage map
+		if domainStorageMap != nil {
+			s.cacheDomainStorageMap(
+				domainStorageKey,
+				domainStorageMap,
 			)
-		})
-		if err != nil {
-			panic(interpreter.WrappedExternalError(err))
 		}
+	}()
 
-		dataLength := len(data)
-		isStorageIndex := dataLength == storageIndexLength
-		if dataLength > 0 && !isStorageIndex {
-			// TODO: add dedicated error type?
-			panic(errors.NewUnexpectedError(
-				"invalid storage index for storage map with domain '%s': expected length %d, got %d",
-				domain.Identifier(), storageIndexLength, dataLength,
-			))
-		}
+	if !s.Config.StorageFormatV2Enabled {
 
-		// Load existing storage or create and store new one
+		// When StorageFormatV2 is disabled, handle all accounts as v1 accounts.
 
-		atreeAddress := atree.Address(address)
+		// Only read requested domain register.
 
-		if isStorageIndex {
-			var slabIndex atree.SlabIndex
-			copy(slabIndex[:], data[:])
-			storageMap = s.loadExistingStorageMap(atreeAddress, slabIndex)
-		} else if createIfNotExists {
-			storageMap = s.StoreNewStorageMap(atreeAddress, domain)
-		}
+		domainStorageMap = s.getDomainStorageMapForV1Account(
+			address,
+			domain,
+			createIfNotExists,
+		)
 
-		if storageMap != nil {
-			s.storageMaps[key] = storageMap
-		}
+		return
+	}
+
+	// StorageFormatV2 is enabled.
+
+	// Check if cached account format is available.
+
+	cachedFormat, known := s.getCachedAccountFormat(address)
+	if known {
+		return s.getDomainStorageMap(
+			cachedFormat,
+			inter,
+			address,
+			domain,
+			createIfNotExists,
+		)
+	}
+
+	// Check if account is v2 (by reading "stored" register).
+
+	if s.isV2Account(address) {
+		return s.getDomainStorageMapForV2Account(
+			inter,
+			address,
+			domain,
+			createIfNotExists,
+		)
+	}
+
+	// Check if account is v1 (by reading requested domain register).
+
+	if s.hasDomainRegister(address, domain) {
+		return s.getDomainStorageMapForV1Account(
+			address,
+			domain,
+			createIfNotExists,
+		)
+	}
+
+	// Domain register doesn't exist.
+
+	// Return early if !createIfNotExists to avoid more register reading.
+
+	if !createIfNotExists {
+		return nil
+	}
+
+	// At this point, account is either new account or v1 account without requested domain register.
+
+	// Check if account is v1 (by reading more domain registers)
+
+	if s.isV1Account(address) {
+		return s.getDomainStorageMapForV1Account(
+			address,
+			domain,
+			createIfNotExists,
+		)
 	}
 
-	return storageMap
+	// New account is treated as v2 account when feature flag is enabled.
+
+	return s.getDomainStorageMapForV2Account(
+		inter,
+		address,
+		domain,
+		createIfNotExists,
+	)
 }
 
-func (s *Storage) loadExistingStorageMap(address atree.Address, slabIndex atree.SlabIndex) *interpreter.StorageMap {
+func (s *Storage) getDomainStorageMapForV1Account(
+	address common.Address,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) *interpreter.DomainStorageMap {
+	domainStorageMap := s.AccountStorageV1.GetDomainStorageMap(
+		address,
+		domain,
+		createIfNotExists,
+	)
 
-	slabID := atree.NewSlabID(address, slabIndex)
+	s.cacheIsV1Account(address, true)
 
-	return interpreter.NewStorageMapWithRootID(s, slabID)
+	return domainStorageMap
 }
 
-func (s *Storage) StoreNewStorageMap(address atree.Address, domain common.StorageDomain) *interpreter.StorageMap {
-	storageMap := interpreter.NewStorageMap(s.memoryGauge, s, address)
+func (s *Storage) getDomainStorageMapForV2Account(
+	inter *interpreter.Interpreter,
+	address common.Address,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) *interpreter.DomainStorageMap {
+	domainStorageMap := s.AccountStorageV2.GetDomainStorageMap(
+		inter,
+		address,
+		domain,
+		createIfNotExists,
+	)
+
+	s.cacheIsV1Account(address, false)
 
-	slabIndex := storageMap.SlabID().Index()
+	return domainStorageMap
+}
 
-	storageKey := interpreter.NewStorageDomainKey(s.memoryGauge, common.Address(address), domain)
+func (s *Storage) getDomainStorageMap(
+	format StorageFormat,
+	inter *interpreter.Interpreter,
+	address common.Address,
+	domain common.StorageDomain,
+	createIfNotExists bool,
+) *interpreter.DomainStorageMap {
+	switch format {
+
+	case StorageFormatV1:
+		return s.getDomainStorageMapForV1Account(
+			address,
+			domain,
+			createIfNotExists,
+		)
+
+	case StorageFormatV2:
+		return s.getDomainStorageMapForV2Account(
+			inter,
+			address,
+			domain,
+			createIfNotExists,
+		)
 
-	if s.NewStorageMaps == nil {
-		s.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex]{}
+	default:
+		panic(errors.NewUnreachableError())
 	}
-	s.NewStorageMaps.Set(storageKey, slabIndex)
+}
+
+func (s *Storage) getCachedAccountFormat(address common.Address) (format StorageFormat, known bool) {
+	isV1, cached := s.cachedV1Accounts[address]
+	if !cached {
+		return StorageFormatUnknown, false
+	}
+	if isV1 {
+		return StorageFormatV1, true
+	} else {
+		return StorageFormatV2, true
+	}
+}
 
-	return storageMap
+// isV2Account returns true if given account is in account storage format v2.
+func (s *Storage) isV2Account(address common.Address) bool {
+	accountStorageMapExists, err := hasAccountStorageMap(s.Ledger, address)
+	if err != nil {
+		panic(err)
+	}
+
+	return accountStorageMapExists
+}
+
+// hasDomainRegister returns true if given account has given domain register.
+// NOTE: account storage format v1 has domain registers.
+func (s *Storage) hasDomainRegister(address common.Address, domain common.StorageDomain) (domainExists bool) {
+	_, domainExists, err := readSlabIndexFromRegister(
+		s.Ledger,
+		address,
+		[]byte(domain.Identifier()),
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	return domainExists
+}
+
+// isV1Account returns true if given account is in account storage format v1
+// by checking if any of the domain registers exist.
+func (s *Storage) isV1Account(address common.Address) (isV1 bool) {
+
+	// Check if a storage map register exists for any of the domains.
+	// Check the most frequently used domains first, such as storage, public, private.
+	for _, domain := range common.AllStorageDomains {
+		domainExists := s.hasDomainRegister(address, domain)
+		if domainExists {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (s *Storage) cacheIsV1Account(address common.Address, isV1 bool) {
+	if s.cachedV1Accounts == nil {
+		s.cachedV1Accounts = map[common.Address]bool{}
+	}
+	s.cachedV1Accounts[address] = isV1
+}
+
+func (s *Storage) cacheDomainStorageMap(
+	storageDomainKey interpreter.StorageDomainKey,
+	domainStorageMap *interpreter.DomainStorageMap,
+) {
+	if s.cachedDomainStorageMaps == nil {
+		s.cachedDomainStorageMaps = map[interpreter.StorageDomainKey]*interpreter.DomainStorageMap{}
+	}
+
+	s.cachedDomainStorageMaps[storageDomainKey] = domainStorageMap
 }
 
 func (s *Storage) recordContractUpdate(
@@ -217,7 +447,7 @@ func (s *Storage) writeContractUpdate(
 	key interpreter.StorageKey,
 	contractValue *interpreter.CompositeValue,
 ) {
-	storageMap := s.GetStorageMap(key.Address, common.StorageDomainContract, true)
+	storageMap := s.GetDomainStorageMap(inter, key.Address, common.StorageDomainContract, true)
 	// NOTE: pass nil instead of allocating a Value-typed  interface that points to nil
 	storageMapKey := interpreter.StringStorageMapKey(key.Key)
 	if contractValue == nil {
@@ -232,7 +462,7 @@ func (s *Storage) Commit(inter *interpreter.Interpreter, commitContractUpdates b
 	return s.commit(inter, commitContractUpdates, true)
 }
 
-// NondeterministicCommit serializes and commits all values in the deltas storage
+// Deprecated: NondeterministicCommit serializes and commits all values in the deltas storage
 // in nondeterministic order.  This function is used when commit ordering isn't
 // required (e.g. migration programs).
 func (s *Storage) NondeterministicCommit(inter *interpreter.Interpreter, commitContractUpdates bool) error {
@@ -245,54 +475,137 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b
 		s.commitContractUpdates(inter)
 	}
 
-	err := s.commitNewStorageMaps()
+	err := s.AccountStorageV1.commit()
 	if err != nil {
 		return err
 	}
 
+	if s.Config.StorageFormatV2Enabled {
+		err = s.AccountStorageV2.commit()
+		if err != nil {
+			return err
+		}
+
+		err = s.migrateV1AccountsToV2(inter)
+		if err != nil {
+			return err
+		}
+	}
+
 	// Commit the underlying slab storage's writes
 
-	size := s.PersistentSlabStorage.DeltasSizeWithoutTempAddresses()
+	slabStorage := s.PersistentSlabStorage
+
+	size := slabStorage.DeltasSizeWithoutTempAddresses()
 	if size > 0 {
 		inter.ReportComputation(common.ComputationKindEncodeValue, uint(size))
 		usage := common.NewBytesMemoryUsage(int(size))
-		common.UseMemory(s.memoryGauge, usage)
+		common.UseMemory(inter, usage)
 	}
 
-	deltas := s.PersistentSlabStorage.DeltasWithoutTempAddresses()
-	common.UseMemory(s.memoryGauge, common.NewAtreeEncodedSlabMemoryUsage(deltas))
+	deltas := slabStorage.DeltasWithoutTempAddresses()
+	common.UseMemory(inter, common.NewAtreeEncodedSlabMemoryUsage(deltas))
 
 	// TODO: report encoding metric for all encoded slabs
 	if deterministic {
-		return s.PersistentSlabStorage.FastCommit(runtime.NumCPU())
+		return slabStorage.FastCommit(runtime.NumCPU())
 	} else {
-		return s.PersistentSlabStorage.NondeterministicFastCommit(runtime.NumCPU())
+		return slabStorage.NondeterministicFastCommit(runtime.NumCPU())
 	}
 }
 
-func (s *Storage) commitNewStorageMaps() error {
-	if s.NewStorageMaps == nil {
+func (s *Storage) ScheduleV2Migration(address common.Address) bool {
+	if !s.Config.StorageFormatV2Enabled {
+		return false
+	}
+	s.scheduledV2Migrations = append(s.scheduledV2Migrations, address)
+	return true
+}
+
+func (s *Storage) ScheduleV2MigrationForModifiedAccounts() bool {
+	for address, isV1 := range s.cachedV1Accounts { //nolint:maprange
+		if isV1 && s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) {
+			if !s.ScheduleV2Migration(address) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error {
+
+	if !s.Config.StorageFormatV2Enabled {
+		return errors.NewUnexpectedError("cannot migrate to storage format v2, as it is not enabled")
+	}
+
+	if len(s.scheduledV2Migrations) == 0 {
 		return nil
 	}
 
-	for pair := s.NewStorageMaps.Oldest(); pair != nil; pair = pair.Next() {
-		var err error
-		errors.WrapPanic(func() {
-			err = s.Ledger.SetValue(
-				pair.Key.Address[:],
-				[]byte(pair.Key.Domain.Identifier()),
-				pair.Value[:],
-			)
-		})
+	// getDomainStorageMap function returns cached domain storage map if it is available
+	// before loading domain storage map from storage.
+	// This is necessary to migrate uncommitted (new) but cached domain storage map.
+	getDomainStorageMap := func(
+		ledger atree.Ledger,
+		storage atree.SlabStorage,
+		address common.Address,
+		domain common.StorageDomain,
+	) (*interpreter.DomainStorageMap, error) {
+		domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain)
+
+		// Get cached domain storage map if available.
+		domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey]
+
+		if domainStorageMap != nil {
+			return domainStorageMap, nil
+		}
+
+		return getDomainStorageMapFromV1DomainRegister(ledger, storage, address, domain)
+	}
+
+	migrator := NewDomainRegisterMigration(
+		s.Ledger,
+		s.PersistentSlabStorage,
+		inter,
+		s.memoryGauge,
+		getDomainStorageMap,
+	)
+
+	// Ensure the scheduled accounts are migrated in a deterministic order
+
+	sort.Slice(
+		s.scheduledV2Migrations,
+		func(i, j int) bool {
+			address1 := s.scheduledV2Migrations[i]
+			address2 := s.scheduledV2Migrations[j]
+			return address1.Compare(address2) < 0
+		},
+	)
+
+	for _, address := range s.scheduledV2Migrations {
+
+		accountStorageMap, err := migrator.MigrateAccount(address)
 		if err != nil {
-			return interpreter.WrappedExternalError(err)
+			return err
 		}
+
+		s.AccountStorageV2.cacheAccountStorageMap(
+			address,
+			accountStorageMap,
+		)
+
+		s.cacheIsV1Account(address, false)
 	}
 
+	s.scheduledV2Migrations = nil
+
 	return nil
 }
 
 func (s *Storage) CheckHealth() error {
+
 	// Check slab storage health
 	rootSlabIDs, err := atree.CheckStorageHealth(s, -1)
 	if err != nil {
@@ -312,28 +625,52 @@ func (s *Storage) CheckHealth() error {
 		accountRootSlabIDs[rootSlabID] = struct{}{}
 	}
 
-	// Check that each storage map refers to an existing slab.
-
-	found := map[atree.SlabID]struct{}{}
+	// Check that account storage maps and unmigrated domain storage maps
+	// match returned root slabs from atree.CheckStorageHealth.
 
 	var storageMapStorageIDs []atree.SlabID
 
-	for _, storageMap := range s.storageMaps { //nolint:maprange
+	if s.Config.StorageFormatV2Enabled {
+		// Get cached account storage map slab IDs.
+		storageMapStorageIDs = append(
+			storageMapStorageIDs,
+			s.AccountStorageV2.cachedRootSlabIDs()...,
+		)
+	}
+
+	// Get slab IDs of cached domain storage maps that are in account storage format v1.
+	for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange
+		address := storageKey.Address
+
+		// Only accounts in storage format v1 store domain storage maps
+		// directly at the root of the account
+		if !s.isV1Account(address) {
+			continue
+		}
+
 		storageMapStorageIDs = append(
 			storageMapStorageIDs,
 			storageMap.SlabID(),
 		)
 	}
 
-	sort.Slice(storageMapStorageIDs, func(i, j int) bool {
-		a := storageMapStorageIDs[i]
-		b := storageMapStorageIDs[j]
-		return a.Compare(b) < 0
-	})
+	sort.Slice(
+		storageMapStorageIDs,
+		func(i, j int) bool {
+			a := storageMapStorageIDs[i]
+			b := storageMapStorageIDs[j]
+			return a.Compare(b) < 0
+		},
+	)
+
+	found := map[atree.SlabID]struct{}{}
 
 	for _, storageMapStorageID := range storageMapStorageIDs {
 		if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok {
-			return errors.NewUnexpectedError("account storage map points to non-existing slab %s", storageMapStorageID)
+			return errors.NewUnexpectedError(
+				"account storage map (and unmigrated domain storage map) points to non-root slab %s",
+				storageMapStorageID,
+			)
 		}
 
 		found[storageMapStorageID] = struct{}{}
@@ -371,6 +708,37 @@ func (s *Storage) CheckHealth() error {
 	return nil
 }
 
+// AccountStorageFormat returns either StorageFormatV1 or StorageFormatV2 for existing accounts,
+// and StorageFormatUnknown for non-existing accounts.
+func (s *Storage) AccountStorageFormat(address common.Address) (format StorageFormat) {
+	cachedFormat, known := s.getCachedAccountFormat(address)
+	if known {
+		return cachedFormat
+	}
+
+	defer func() {
+		// Cache account fomat
+		switch format {
+		case StorageFormatV1:
+			s.cacheIsV1Account(address, true)
+		case StorageFormatV2:
+			s.cacheIsV1Account(address, false)
+		}
+	}()
+
+	if s.Config.StorageFormatV2Enabled {
+		if s.isV2Account(address) {
+			return StorageFormatV2
+		}
+	}
+
+	if s.isV1Account(address) {
+		return StorageFormatV1
+	}
+
+	return StorageFormatUnknown
+}
+
 type UnreferencedRootSlabsError struct {
 	UnreferencedRootSlabIDs []atree.SlabID
 }
diff --git a/runtime/storage_test.go b/runtime/storage_test.go
index aedd492c11..c9efbfdc5f 100644
--- a/runtime/storage_test.go
+++ b/runtime/storage_test.go
@@ -23,7 +23,11 @@ import (
 	"encoding/hex"
 	"fmt"
 	"math/rand"
+	"runtime"
+	"slices"
 	"sort"
+	"strconv"
+	"strings"
 	"testing"
 
 	"github.com/onflow/atree"
@@ -32,7 +36,6 @@ import (
 
 	"github.com/onflow/cadence"
 	"github.com/onflow/cadence/common"
-	"github.com/onflow/cadence/common/orderedmap"
 	"github.com/onflow/cadence/encoding/json"
 	"github.com/onflow/cadence/interpreter"
 	. "github.com/onflow/cadence/runtime"
@@ -49,7 +52,13 @@ func withWritesToStorage(
 	handler func(*Storage, *interpreter.Interpreter),
 ) {
 	ledger := NewTestLedger(nil, onWrite)
-	storage := NewStorage(ledger, nil)
+	storage := NewStorage(
+		ledger,
+		nil,
+		StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
 
 	inter := NewTestInterpreter(tb)
 
@@ -60,18 +69,10 @@ func withWritesToStorage(
 		var address common.Address
 		random.Read(address[:])
 
-		storageKey := interpreter.StorageDomainKey{
-			Address: address,
-			Domain:  common.StorageDomainPathStorage,
-		}
-
 		var slabIndex atree.SlabIndex
 		binary.BigEndian.PutUint32(slabIndex[:], randomIndex)
 
-		if storage.NewStorageMaps == nil {
-			storage.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex]{}
-		}
-		storage.NewStorageMaps.Set(storageKey, slabIndex)
+		storage.AccountStorageV2.SetNewAccountStorageMapSlabIndex(address, slabIndex)
 	}
 
 	handler(storage, inter)
@@ -155,7 +156,9 @@ func TestRuntimeStorageWrite(t *testing.T) {
 
 	t.Parallel()
 
-	runtime := NewTestInterpreterRuntime()
+	config := DefaultTestInterpreterConfig
+	config.StorageFormatV2Enabled = true
+	runtime := NewTestInterpreterRuntimeWithConfig(config)
 
 	address := common.MustBytesToAddress([]byte{0x1})
 
@@ -198,16 +201,22 @@ func TestRuntimeStorageWrite(t *testing.T) {
 
 	assert.Equal(t,
 		[]ownerKeyPair{
-			// storage index to storage domain storage map
+			// storage index to account storage map
 			{
 				[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
-				[]byte("storage"),
+				[]byte(AccountStorageKey),
 			},
 			// storage domain storage map
+			// NOTE: storage domain storage map is empty because it is inlined in account storage map
 			{
 				[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
 				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
 			},
+			// account storage map
+			{
+				[]byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1},
+				[]byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2},
+			},
 		},
 		writes,
 	)
@@ -1610,161 +1619,219 @@ func TestRuntimeResourceOwnerChange(t *testing.T) {
 
 	t.Parallel()
 
-	config := DefaultTestInterpreterConfig
-	config.ResourceOwnerChangeHandlerEnabled = true
-	runtime := NewTestInterpreterRuntimeWithConfig(config)
+	test := func(
+		storageFormatV2Enabled bool,
+		expectedNonEmptyKeys []string,
+	) {
 
-	address1 := common.MustBytesToAddress([]byte{0x1})
-	address2 := common.MustBytesToAddress([]byte{0x2})
+		name := fmt.Sprintf(
+			"storage format V2 enabled: %v",
+			storageFormatV2Enabled,
+		)
+		t.Run(name, func(t *testing.T) {
+			t.Parallel()
 
-	ledger := NewTestLedger(nil, nil)
+			config := DefaultTestInterpreterConfig
+			config.ResourceOwnerChangeHandlerEnabled = true
+			config.StorageFormatV2Enabled = storageFormatV2Enabled
+			runtime := NewTestInterpreterRuntimeWithConfig(config)
 
-	var signers []Address
+			address1 := common.MustBytesToAddress([]byte{0x1})
+			address2 := common.MustBytesToAddress([]byte{0x2})
 
-	deployTx := DeploymentTransaction("Test", []byte(`
-      access(all) contract Test {
+			ledger := NewTestLedger(nil, nil)
 
-          access(all) resource R {}
+			var signers []Address
 
-          access(all) fun createR(): @R {
-              return <-create R()
-          }
-      }
-    `))
+			deployTx := DeploymentTransaction("Test", []byte(`
+              access(all) contract Test {
 
-	type resourceOwnerChange struct {
-		uuid       *interpreter.UInt64Value
-		typeID     common.TypeID
-		oldAddress common.Address
-		newAddress common.Address
-	}
+                  access(all) resource R {}
 
-	accountCodes := map[Location][]byte{}
-	var events []cadence.Event
-	var loggedMessages []string
-	var resourceOwnerChanges []resourceOwnerChange
+                  access(all) fun createR(): @R {
+                      return <-create R()
+                  }
+              }
+            `))
 
-	runtimeInterface := &TestRuntimeInterface{
-		Storage: ledger,
-		OnGetSigningAccounts: func() ([]Address, error) {
-			return signers, nil
-		},
-		OnResolveLocation: NewSingleIdentifierLocationResolver(t),
-		OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
-			accountCodes[location] = code
-			return nil
-		},
-		OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
-			code = accountCodes[location]
-			return code, nil
-		},
-		OnEmitEvent: func(event cadence.Event) error {
-			events = append(events, event)
-			return nil
-		},
-		OnProgramLog: func(message string) {
-			loggedMessages = append(loggedMessages, message)
-		},
-		OnResourceOwnerChanged: func(
-			inter *interpreter.Interpreter,
-			resource *interpreter.CompositeValue,
-			oldAddress common.Address,
-			newAddress common.Address,
-		) {
-			resourceOwnerChanges = append(
-				resourceOwnerChanges,
-				resourceOwnerChange{
-					typeID: resource.TypeID(),
-					// TODO: provide proper location range
-					uuid:       resource.ResourceUUID(inter, interpreter.EmptyLocationRange),
-					oldAddress: oldAddress,
-					newAddress: newAddress,
+			type resourceOwnerChange struct {
+				uuid       *interpreter.UInt64Value
+				typeID     common.TypeID
+				oldAddress common.Address
+				newAddress common.Address
+			}
+
+			accountCodes := map[Location][]byte{}
+			var events []cadence.Event
+			var loggedMessages []string
+			var resourceOwnerChanges []resourceOwnerChange
+
+			runtimeInterface := &TestRuntimeInterface{
+				Storage: ledger,
+				OnGetSigningAccounts: func() ([]Address, error) {
+					return signers, nil
 				},
-			)
-		},
-	}
+				OnResolveLocation: NewSingleIdentifierLocationResolver(t),
+				OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error {
+					accountCodes[location] = code
+					return nil
+				},
+				OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) {
+					code = accountCodes[location]
+					return code, nil
+				},
+				OnEmitEvent: func(event cadence.Event) error {
+					events = append(events, event)
+					return nil
+				},
+				OnProgramLog: func(message string) {
+					loggedMessages = append(loggedMessages, message)
+				},
+				OnResourceOwnerChanged: func(
+					inter *interpreter.Interpreter,
+					resource *interpreter.CompositeValue,
+					oldAddress common.Address,
+					newAddress common.Address,
+				) {
+					resourceOwnerChanges = append(
+						resourceOwnerChanges,
+						resourceOwnerChange{
+							typeID: resource.TypeID(),
+							// TODO: provide proper location range
+							uuid:       resource.ResourceUUID(inter, interpreter.EmptyLocationRange),
+							oldAddress: oldAddress,
+							newAddress: newAddress,
+						},
+					)
+				},
+			}
 
-	nextTransactionLocation := NewTransactionLocationGenerator()
+			nextTransactionLocation := NewTransactionLocationGenerator()
 
-	// Deploy contract
+			// Deploy contract
 
-	signers = []Address{address1}
+			signers = []Address{address1}
 
-	err := runtime.ExecuteTransaction(
-		Script{
-			Source: deployTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
+			err := runtime.ExecuteTransaction(
+				Script{
+					Source: deployTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
 
-	// Store
+			// Store
 
-	signers = []Address{address1}
+			signers = []Address{address1}
 
-	storeTx := []byte(`
-      import Test from 0x1
+			storeTx := []byte(`
+              import Test from 0x1
 
-      transaction {
-          prepare(signer: auth(Storage) &Account) {
-              signer.storage.save(<-Test.createR(), to: /storage/test)
-          }
-      }
-    `)
+              transaction {
+                  prepare(signer: auth(Storage) &Account) {
+                      signer.storage.save(<-Test.createR(), to: /storage/test)
+                  }
+              }
+            `)
 
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: storeTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: storeTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
 
-	// Transfer
+			// Transfer
 
-	signers = []Address{address1, address2}
+			signers = []Address{address1, address2}
 
-	transferTx := []byte(`
-      import Test from 0x1
+			transferTx := []byte(`
+              import Test from 0x1
 
-      transaction {
-          prepare(
-              signer1: auth(Storage) &Account,
-              signer2: auth(Storage) &Account
-          ) {
-              let value <- signer1.storage.load<@Test.R>(from: /storage/test)!
-              signer2.storage.save(<-value, to: /storage/test)
-          }
-      }
-    `)
+              transaction {
+                  prepare(
+                      signer1: auth(Storage) &Account,
+                      signer2: auth(Storage) &Account
+                  ) {
+                      let value <- signer1.storage.load<@Test.R>(from: /storage/test)!
+                      signer2.storage.save(<-value, to: /storage/test)
+                  }
+              }
+            `)
 
-	err = runtime.ExecuteTransaction(
-		Script{
-			Source: transferTx,
-		},
-		Context{
-			Interface: runtimeInterface,
-			Location:  nextTransactionLocation(),
-		},
-	)
-	require.NoError(t, err)
+			err = runtime.ExecuteTransaction(
+				Script{
+					Source: transferTx,
+				},
+				Context{
+					Interface: runtimeInterface,
+					Location:  nextTransactionLocation(),
+				},
+			)
+			require.NoError(t, err)
 
-	var nonEmptyKeys []string
-	for key, data := range ledger.StoredValues {
-		if len(data) > 0 {
-			nonEmptyKeys = append(nonEmptyKeys, key)
-		}
-	}
+			var actualNonEmptyKeys []string
+			for key, data := range ledger.StoredValues {
+				if len(data) > 0 {
+					actualNonEmptyKeys = append(actualNonEmptyKeys, key)
+				}
+			}
 
-	sort.Strings(nonEmptyKeys)
+			sort.Strings(actualNonEmptyKeys)
 
-	assert.Equal(t,
+			assert.Equal(t,
+				expectedNonEmptyKeys,
+				actualNonEmptyKeys,
+			)
+
+			expectedUUID := interpreter.NewUnmeteredUInt64Value(1)
+			assert.Equal(t,
+				[]resourceOwnerChange{
+					{
+						typeID: "A.0000000000000001.Test.R",
+						uuid:   &expectedUUID,
+						oldAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+						},
+						newAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1,
+						},
+					},
+					{
+						typeID: "A.0000000000000001.Test.R",
+						uuid:   &expectedUUID,
+						oldAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1,
+						},
+						newAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+						},
+					},
+					{
+						typeID: "A.0000000000000001.Test.R",
+						uuid:   &expectedUUID,
+						oldAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+						},
+						newAddress: common.Address{
+							0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2,
+						},
+					},
+				},
+				resourceOwnerChanges,
+			)
+		})
+	}
+
+	test(
+		false,
 		[]string{
 			// account 0x1:
 			// NOTE: with atree inlining, contract is inlined in contract map
@@ -1780,44 +1847,25 @@ func TestRuntimeResourceOwnerChange(t *testing.T) {
 			"\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02",
 			"\x00\x00\x00\x00\x00\x00\x00\x02|storage",
 		},
-		nonEmptyKeys,
 	)
 
-	expectedUUID := interpreter.NewUnmeteredUInt64Value(1)
-	assert.Equal(t,
-		[]resourceOwnerChange{
-			{
-				typeID: "A.0000000000000001.Test.R",
-				uuid:   &expectedUUID,
-				oldAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-				},
-				newAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1,
-				},
-			},
-			{
-				typeID: "A.0000000000000001.Test.R",
-				uuid:   &expectedUUID,
-				oldAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1,
-				},
-				newAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-				},
-			},
-			{
-				typeID: "A.0000000000000001.Test.R",
-				uuid:   &expectedUUID,
-				oldAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
-				},
-				newAddress: common.Address{
-					0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2,
-				},
-			},
+	test(
+		true,
+		[]string{
+			// account 0x1:
+			// NOTE: with account storage map and atree inlining,
+			//   both storage domain storage map (with inlined storage data)
+			//   and contract domain storage map (with inlined contract data)
+			//   are inlined in account storage map.
+			"\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x02",
+			"\x00\x00\x00\x00\x00\x00\x00\x01|stored",
+			// account 0x2
+			// NOTE: with account storage map and atree inlining,
+			//   storage domain storage map (with inlined resource)
+			//   is inlined in account storage map.
+			"\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02",
+			"\x00\x00\x00\x00\x00\x00\x00\x02|stored",
 		},
-		resourceOwnerChanges,
 	)
 }
 
@@ -3101,7 +3149,7 @@ func TestRuntimeStorageInternalAccess(t *testing.T) {
 	})
 	require.NoError(t, err)
 
-	storageMap := storage.GetStorageMap(address, common.PathDomainStorage.StorageDomain(), false)
+	storageMap := storage.GetDomainStorageMap(inter, address, common.PathDomainStorage.StorageDomain(), false)
 	require.NotNil(t, storageMap)
 
 	// Read first
@@ -6230,3 +6278,3084 @@ func TestRuntimeStorageReferenceAccess(t *testing.T) {
 		require.ErrorAs(t, err, &interpreter.DereferenceError{})
 	})
 }
+
+type (
+	domainStorageMapValues  map[interpreter.StorageMapKey]interpreter.Value
+	accountStorageMapValues map[common.StorageDomain]domainStorageMapValues
+)
+
+func TestRuntimeStorageForNewAccount(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	// This test reads non-existent domain storage map and commit changes.
+	// pre-condition: empty storage
+	// post-condition: empty storage
+	// migration: no migration
+	t.Run("read non-existent domain storage map", func(t *testing.T) {
+
+		var writeCount int
+
+		// Create empty storage
+		ledger := NewTestLedger(nil, LedgerOnWriteCounter(&writeCount))
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		domain := common.PathDomainStorage.StorageDomain()
+
+		// Get non-existent domain storage map
+		const createIfNotExists = false
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+		require.Nil(t, domainStorageMap)
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Check storage health
+		err = storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Check number of writes to underlying storage
+		require.Equal(t, 0, writeCount)
+	})
+
+	// This test creates and writes to new domain storage map and commit changes.
+	// pre-condition: empty storage
+	// post-condition: storage containing
+	//  - account register
+	//  - account storage map
+	//  - zero or more non-inlined domain storage map
+	// migration: no migraiton for new account.
+	createDomainTestCases := []struct {
+		name                  string
+		newDomains            []common.StorageDomain
+		domainStorageMapCount int
+		inlined               bool
+	}{
+		{name: "empty domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 0, inlined: true},
+		{name: "small domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 10, inlined: true},
+		{name: "large domain storage map", newDomains: []common.StorageDomain{common.PathDomainStorage.StorageDomain()}, domainStorageMapCount: 20, inlined: false},
+	}
+
+	for _, tc := range createDomainTestCases {
+		t.Run("create "+tc.name, func(t *testing.T) {
+
+			var writeEntries []OwnerKeyValue
+
+			// Create empty storage
+			ledger := NewTestLedger(nil, LedgerOnWriteEntries(&writeEntries))
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			random := rand.New(rand.NewSource(42))
+
+			accountValues := make(accountStorageMapValues)
+
+			// Create and write to domain storage map (createIfNotExists is true)
+			for _, domain := range tc.newDomains {
+				// Create new domain storage map
+				const createIfNotExists = true
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+
+				// Write to domain storage map
+				accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.domainStorageMapCount, random)
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 2+len(tc.newDomains), len(writeEntries))
+
+			// writes[0]: account register
+			require.Equal(t, address[:], writeEntries[0].Owner)
+			require.Equal(t, []byte(AccountStorageKey), writeEntries[0].Key)
+			require.Equal(t, []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Value)
+
+			// writes[1]: account storage map
+			require.Equal(t, address[:], writeEntries[1].Owner)
+			require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[1].Key)
+			require.True(t, len(writeEntries[1].Value) > 0)
+
+			for i := range len(tc.newDomains) {
+				// writes[2+i]: domain storage map
+
+				writeEntryIndex := 2 + i
+				owner := writeEntries[writeEntryIndex].Owner
+				key := writeEntries[writeEntryIndex].Key
+				value := writeEntries[writeEntryIndex].Value
+
+				var slabKey [9]byte
+				slabKey[0] = '$'
+				binary.BigEndian.PutUint64(slabKey[1:], uint64(2+i))
+
+				require.Equal(t, address[:], owner)
+				require.Equal(t, slabKey[:], key)
+
+				// Domain storage map value is empty if it is inlined in account storage map
+				if tc.inlined {
+					require.True(t, len(value) == 0)
+				} else {
+					require.True(t, len(value) > 0)
+				}
+			}
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		})
+	}
+
+	// This test tests storage map operations with intermittent Commit():
+	// - create domain storage map and commit
+	// - write to domain storage map and commit
+	// - remove all elements from domain storage map and commit
+	// - read domain storage map and commit
+	t.Run("create, commit, write, commit, remove, commit", func(t *testing.T) {
+		// Create empty storage
+		ledger := NewTestLedger(nil, nil)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		accountValues := make(accountStorageMapValues)
+
+		domains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+			common.PathDomainPublic.StorageDomain(),
+		}
+
+		// Create empty domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = true
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+
+				accountValues[domain] = make(domainStorageMapValues)
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Write to existing domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+
+				// Write to domain storage map
+				const domainStorageMapCount = 2
+				accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random)
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Remove all elements from existing domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				expectedDomainValues := accountValues[domain]
+				require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count())
+
+				// Remove elements from domain storage map
+				for k := range expectedDomainValues {
+					existed := domainStorageMap.WriteValue(inter, k, nil)
+					require.True(t, existed)
+
+					delete(expectedDomainValues, k)
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Read domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+	})
+}
+
+func TestRuntimeStorageForMigratedAccount(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	// newTestLedgerWithMigratedAccount creates a new TestLedger containing
+	// account storage map with given domains for given address.
+	newTestLedgerWithMigratedAccount := func(
+		onRead LedgerOnRead,
+		onWrite LedgerOnWrite,
+		address common.Address,
+		domains []common.StorageDomain,
+		domainStorageMapCount int,
+	) (TestLedger, accountStorageMapValues) {
+		ledger := NewTestLedger(nil, nil)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		accountValues := createAndWriteAccountStorageMap(t, storage, inter, address, domains, domainStorageMapCount, random)
+
+		newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices)
+
+		return newLedger, accountValues
+	}
+
+	// This test reads non-existent domain storage map and commit changes.
+	// pre-condition: storage contains account register and account storage map
+	// post-condition: no change
+	// migration: none
+	t.Run("read non-existent domain storage map", func(t *testing.T) {
+		existingDomains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+		}
+
+		nonexistentDomain := common.PathDomainPublic.StorageDomain()
+
+		var writeCount int
+
+		// Create storage with account storage map
+		const domainStorageMapCount = 5
+		ledger, _ := newTestLedgerWithMigratedAccount(
+			nil,
+			LedgerOnWriteCounter(&writeCount),
+			address,
+			existingDomains,
+			domainStorageMapCount)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		// Get non-existent domain storage map
+		const createIfNotExists = false
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, nonexistentDomain, createIfNotExists)
+		require.Nil(t, domainStorageMap)
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Check writes to underlying storage
+		require.Equal(t, 0, writeCount)
+	})
+
+	// This test reads existing domain storage map and commit changes.
+	// pre-condition: storage contains account register and account storage map
+	// post-condition: no change
+	// migration: none
+	readExistingDomainTestCases := []struct {
+		name              string
+		createIfNotExists bool
+	}{
+		{name: "(createIfNotExists is true)", createIfNotExists: true},
+		{name: "(createIfNotExists is false)", createIfNotExists: false},
+	}
+
+	for _, tc := range readExistingDomainTestCases {
+		t.Run("read existing domain storage map "+tc.name, func(t *testing.T) {
+
+			existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+
+			var writeCount int
+
+			// Create storage with account storage map
+			const domainStorageMapCount = 5
+			ledger, accountValues := newTestLedgerWithMigratedAccount(
+				nil,
+				LedgerOnWriteCounter(&writeCount),
+				address,
+				existingDomains,
+				domainStorageMapCount,
+			)
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			// Read existing domain storage map
+			for domain, domainValues := range accountValues {
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				for k, expectedV := range domainValues {
+					v := domainStorageMap.ReadValue(nil, k)
+					ev, ok := v.(interpreter.EquatableValue)
+					require.True(t, ok)
+					require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV))
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 0, writeCount)
+		})
+	}
+
+	// This test creates and writes to new domain storage map and commit changes.
+	// pre-condition: storage contains account register and account storage map
+	// post-condition: storage contains
+	//  - account register
+	//  - account storage map with new domain storage map.
+	createDomainTestCases := []struct {
+		name                          string
+		existingDomains               []common.StorageDomain
+		newDomains                    []common.StorageDomain
+		existingDomainStorageMapCount int
+		newDomainStorageMapCount      int
+		isNewDomainStorageMapInlined  bool
+	}{
+		{
+			name:                          "empty domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      0,
+			isNewDomainStorageMapInlined:  true,
+		},
+		{
+			name:                          "small domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      10,
+			isNewDomainStorageMapInlined:  true,
+		},
+		{
+			name:                          "large domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      20,
+			isNewDomainStorageMapInlined:  false,
+		},
+	}
+
+	for _, tc := range createDomainTestCases {
+		t.Run("create and write "+tc.name, func(t *testing.T) {
+
+			var writeEntries []OwnerKeyValue
+
+			// Create storage with existing account storage map
+			ledger, accountValues := newTestLedgerWithMigratedAccount(
+				nil,
+				LedgerOnWriteEntries(&writeEntries),
+				address,
+				tc.existingDomains,
+				tc.existingDomainStorageMapCount,
+			)
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			lastIndex := ledger.StorageIndices[string(address[:])]
+
+			random := rand.New(rand.NewSource(42))
+
+			// Create and write to domain storage map (createIfNotExists is true)
+			for _, domain := range tc.newDomains {
+				const createIfNotExists = true
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+
+				// Write elements to to domain storage map
+				accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random)
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 1+len(tc.newDomains), len(writeEntries))
+
+			// writes[0]: account storage map
+			// account storage map is updated to include new domains.
+			require.Equal(t, address[:], writeEntries[0].Owner)
+			require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key)
+			require.True(t, len(writeEntries[0].Value) > 0)
+
+			for i := range len(tc.newDomains) {
+				// writes[1+i]: domain storage map
+				// domain storage map value is empty if it is inlined in account storage map
+
+				writeEntryIndex := 1 + i
+				owner := writeEntries[writeEntryIndex].Owner
+				key := writeEntries[writeEntryIndex].Key
+				value := writeEntries[writeEntryIndex].Value
+
+				var slabKey [9]byte
+				slabKey[0] = '$'
+				binary.BigEndian.PutUint64(slabKey[1:], lastIndex+1+uint64(i))
+
+				require.Equal(t, address[:], owner)
+				require.Equal(t, slabKey[:], key)
+
+				if tc.isNewDomainStorageMapInlined {
+					require.True(t, len(value) == 0)
+				} else {
+					require.True(t, len(value) > 0)
+				}
+			}
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		})
+	}
+
+	// This test reads and writes to existing domain storage map and commit changes.
+	// pre-condition: storage contains account register and account storage map
+	// post-condition: storage contains
+	//  - account register
+	//  - account storage map with updated domain storage map.
+	t.Run("read and write to existing domain storage map", func(t *testing.T) {
+
+		var writeEntries []OwnerKeyValue
+
+		existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+		const existingDomainStorageMapCount = 5
+
+		// Create storage with account storage map
+		ledger, accountValues := newTestLedgerWithMigratedAccount(
+			nil,
+			LedgerOnWriteEntries(&writeEntries),
+			address,
+			existingDomains,
+			existingDomainStorageMapCount,
+		)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		// Write to existing domain storage map (createIfNotExists is false)
+		for _, domain := range existingDomains {
+			const createIfNotExists = false
+			domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+			require.NotNil(t, domainStorageMap)
+
+			domainValues := accountValues[domain]
+
+			require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+			domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues))
+			for k := range domainValues { //nolint:maprange
+				domainKeys = append(domainKeys, k)
+			}
+
+			// Update or remove existing elements
+			for i, k := range domainKeys {
+				if i%2 == 0 {
+					n := random.Int()
+					newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+					// Update existing element
+					existed := domainStorageMap.WriteValue(inter, k, newValue)
+					require.True(t, existed)
+
+					domainValues[k] = newValue
+				} else {
+					// Remove existing element
+					existed := domainStorageMap.WriteValue(inter, k, nil)
+					require.True(t, existed)
+
+					delete(domainValues, k)
+				}
+			}
+
+			// Write new elements
+			const newElementCount = 2
+			newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random)
+
+			for k, v := range newDomainValues {
+				domainValues[k] = v
+			}
+		}
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Check storage health after commit
+		err = storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Check writes to underlying storage
+		require.Equal(t, 1, len(writeEntries))
+
+		// writes[0]: account storage map
+		// account storage map is updated because inlined domain storage map is updated.
+		require.Equal(t, address[:], writeEntries[0].Owner)
+		require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key)
+		require.True(t, len(writeEntries[0].Value) > 0)
+
+		// Verify account storage map data
+		checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+	})
+
+	// This test tests storage map operations with intermittent Commit():
+	// - read domain storage map and commit
+	// - write to domain storage map and commit
+	// - remove all elements from domain storage map and commit
+	// - read domain storage map and commit
+	t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) {
+
+		domains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+			common.PathDomainPublic.StorageDomain(),
+		}
+		const domainStorageMapCount = 5
+
+		// Create storage with existing account storage map
+		ledger, accountValues := newTestLedgerWithMigratedAccount(
+			nil,
+			nil,
+			address,
+			domains,
+			domainStorageMapCount,
+		)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		// Read domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				for k, expectedValue := range domainValues {
+					v := domainStorageMap.ReadValue(nil, k)
+					ev := v.(interpreter.EquatableValue)
+					require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue))
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Write to existing domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				// Write to domain storage map
+				const domainStorageMapCount = 2
+				newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random)
+				for k, v := range newDomainValues {
+					domainValues[k] = v
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Remove all elements from existing domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				expectedDomainValues := accountValues[domain]
+				require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count())
+
+				// Remove elements from domain storage map
+				for k := range expectedDomainValues {
+					existed := domainStorageMap.WriteValue(inter, k, nil)
+					require.True(t, existed)
+
+					delete(expectedDomainValues, k)
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Read domain storage map
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+	})
+}
+
+func TestRuntimeStorageForUnmigratedAccount(t *testing.T) {
+
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	newTestLedgerWithUnmigratedAccount := func(
+		onRead LedgerOnRead,
+		onWrite LedgerOnWrite,
+		address common.Address,
+		domains []common.StorageDomain,
+		domainStorageMapCount int,
+	) (TestLedger, accountStorageMapValues) {
+		ledger := NewTestLedger(nil, nil)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: false,
+			},
+		)
+
+		inter := NewTestInterpreter(t)
+
+		accountValues := make(accountStorageMapValues)
+
+		random := rand.New(rand.NewSource(42))
+
+		for _, domain := range domains {
+			accountValues[domain] = make(domainStorageMapValues)
+
+			// Create domain storage map
+			domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address))
+
+			// Write domain register
+			domainStorageMapValueID := domainStorageMap.ValueID()
+			err := ledger.SetValue(address[:], []byte(domain.Identifier()), domainStorageMapValueID[8:])
+			require.NoError(t, err)
+
+			// Write elements to to domain storage map
+			for len(accountValues[domain]) < domainStorageMapCount {
+				n := random.Int()
+				key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+				value := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+				_ = domainStorageMap.WriteValue(inter, key, value)
+
+				accountValues[domain][key] = value
+			}
+		}
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Create a new storage
+		newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices)
+
+		return newLedger, accountValues
+	}
+
+	// This test reads non-existent domain storage map and commit changes.
+	// pre-condition: storage contains domain register and domain storage map
+	// post-condition: no change
+	// migration: none because only read ops.
+	t.Run("read non-existent domain storage map", func(t *testing.T) {
+		existingDomains := []common.StorageDomain{
+			common.PathDomainStorage.StorageDomain(),
+		}
+
+		var writeCount int
+
+		// Create storage with unmigrated accounts
+		const domainStorageMapCount = 5
+		ledger, _ := newTestLedgerWithUnmigratedAccount(
+			nil,
+			LedgerOnWriteCounter(&writeCount),
+			address,
+			existingDomains,
+			domainStorageMapCount)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		// Get non-existent domain storage map
+		const createIfNotExists = false
+		nonExistingDomain := common.PathDomainPublic.StorageDomain()
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, nonExistingDomain, createIfNotExists)
+		require.Nil(t, domainStorageMap)
+
+		storage.ScheduleV2MigrationForModifiedAccounts()
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Check there are no writes to underlying storage
+		require.Equal(t, 0, writeCount)
+	})
+
+	// This test reads existing domain storage map and commit changes.
+	// pre-condition: storage contains domain register and domain storage map
+	// post-condition: no change
+	// migration: none because only read ops
+	readExistingDomainTestCases := []struct {
+		name              string
+		createIfNotExists bool
+	}{
+		{name: "(createIfNotExists is true)", createIfNotExists: true},
+		{name: "(createIfNotExists is false)", createIfNotExists: false},
+	}
+
+	for _, tc := range readExistingDomainTestCases {
+		t.Run("read existing domain storage map "+tc.name, func(t *testing.T) {
+
+			var writeCount int
+
+			existingDomains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+			const existingDomainStorageMapCount = 5
+
+			// Create storage with existing domain storage map
+			ledger, accountValues := newTestLedgerWithUnmigratedAccount(
+				nil,
+				LedgerOnWriteCounter(&writeCount),
+				address,
+				existingDomains,
+				existingDomainStorageMapCount,
+			)
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			// Read existing domain storage map
+			for domain, domainValues := range accountValues {
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				// Read elements to domain storage map
+				for k, expectedV := range domainValues {
+					v := domainStorageMap.ReadValue(nil, k)
+					ev, ok := v.(interpreter.EquatableValue)
+					require.True(t, ok)
+					require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV))
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 0, writeCount)
+		})
+	}
+
+	// This test creates and writes to new domain storage map and commit changes.
+	// pre-condition: storage contains
+	// - domain register
+	// - domain storage map
+	// post-condition: storage contains
+	// - account register
+	// - account storage map with existing and new domain storage map.
+	// migration: yes
+	createDomainTestCases := []struct {
+		name                          string
+		existingDomains               []common.StorageDomain
+		newDomains                    []common.StorageDomain
+		existingDomainStorageMapCount int
+		newDomainStorageMapCount      int
+		isNewDomainStorageMapInlined  bool
+	}{
+		{
+			name:                          "empty domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      0,
+			isNewDomainStorageMapInlined:  true,
+		},
+		{
+			name:                          "small domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      10,
+			isNewDomainStorageMapInlined:  true,
+		},
+		{
+			name:                          "large domain storage map",
+			existingDomains:               []common.StorageDomain{common.PathDomainStorage.StorageDomain()},
+			existingDomainStorageMapCount: 5,
+			newDomains:                    []common.StorageDomain{common.PathDomainPublic.StorageDomain()},
+			newDomainStorageMapCount:      20,
+			isNewDomainStorageMapInlined:  false,
+		},
+	}
+
+	for _, tc := range createDomainTestCases {
+		t.Run("create and write "+tc.name, func(t *testing.T) {
+
+			var writeEntries []OwnerKeyValue
+
+			// Create storage with existing account storage map
+			ledger, accountValues := newTestLedgerWithUnmigratedAccount(
+				nil,
+				LedgerOnWriteEntries(&writeEntries),
+				address,
+				tc.existingDomains,
+				tc.existingDomainStorageMapCount,
+			)
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			random := rand.New(rand.NewSource(42))
+
+			// Create and write to new domain storage map
+			for _, domain := range tc.newDomains {
+				const createIfNotExists = true
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+				require.Equal(t, uint64(0), domainStorageMap.Count())
+
+				// Write elements to to domain storage map
+				accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random)
+			}
+
+			// TODO:
+			storage.ScheduleV2MigrationForModifiedAccounts()
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			// writes include:
+			// - empty registers for all existing and new domains
+			// - 1 account register
+			// - 1 account storage map register
+			// - other non-inlined domain storage map
+			require.True(t, len(writeEntries) > 1+len(tc.existingDomains)+len(tc.newDomains))
+
+			i := 0
+
+			// Check new domain register committed in V1 format.
+			for _, domain := range common.AllStorageDomains {
+
+				if slices.Contains(tc.newDomains, domain) {
+
+					// New domains are committed in V1 format (with domain register).
+					require.Equal(t, address[:], writeEntries[i].Owner)
+					require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key)
+					require.True(t, len(writeEntries[i].Value) > 0)
+
+					i++
+				}
+			}
+
+			// Check modified registers in migration.
+			for _, domain := range common.AllStorageDomains {
+
+				if slices.Contains(tc.existingDomains, domain) ||
+					slices.Contains(tc.newDomains, domain) {
+
+					// Existing and new domain registers are removed (migrated).
+					// Removing new (non-existent) domain registers is no-op.
+					require.Equal(t, address[:], writeEntries[i].Owner)
+					require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key)
+					require.True(t, len(writeEntries[i].Value) == 0)
+
+					i++
+				}
+			}
+
+			// Account register is created
+			require.Equal(t, address[:], writeEntries[i].Owner)
+			require.Equal(t, []byte(AccountStorageKey), writeEntries[i].Key)
+			require.True(t, len(writeEntries[i].Value) > 0)
+
+			i++
+
+			// Remaining writes are atree slabs (either empty for migrated domain storage map or non-empty for account storage map)
+			for ; i < len(writeEntries); i++ {
+				require.Equal(t, address[:], writeEntries[i].Owner)
+				require.Equal(t, byte('$'), writeEntries[i].Key[0])
+			}
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		})
+	}
+
+	// This test reads and writes to existing domain storage map and commit changes.
+	// pre-condition: storage contains
+	// - domain register
+	// - domain storage map
+	// post-condition: storage contains
+	// - account register
+	// - account storage map with updated domain storage map.
+	// migration: yes
+	t.Run("read and write to existing domain storage map", func(t *testing.T) {
+
+		var writeEntries []OwnerKeyValue
+
+		domains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+		const existingDomainStorageMapCount = 5
+
+		// Create storage with existing domain storage maps
+		ledger, accountValues := newTestLedgerWithUnmigratedAccount(
+			nil,
+			LedgerOnWriteEntries(&writeEntries),
+			address,
+			domains,
+			existingDomainStorageMapCount,
+		)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		// write to existing domain storage map (createIfNotExists is false)
+		for _, domain := range domains {
+			const createIfNotExists = false
+			domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+			require.NotNil(t, domainStorageMap)
+
+			domainValues := accountValues[domain]
+			require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+			domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues))
+			for k := range domainValues { //nolint:maprange
+				domainKeys = append(domainKeys, k)
+			}
+
+			// Update or remove elements
+			for i, k := range domainKeys {
+				if i%2 == 0 {
+					n := random.Int()
+					newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+					// Update existing element
+					existed := domainStorageMap.WriteValue(inter, k, newValue)
+					require.True(t, existed)
+
+					domainValues[k] = newValue
+				} else {
+					// Remove existing element
+					existed := domainStorageMap.WriteValue(inter, k, nil)
+					require.True(t, existed)
+
+					delete(domainValues, k)
+				}
+			}
+
+			// Write new elements
+			const newElementCount = 2
+			newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random)
+
+			for k, v := range newDomainValues {
+				domainValues[k] = v
+			}
+		}
+
+		// TODO:
+		storage.ScheduleV2MigrationForModifiedAccounts()
+
+		// Commit changes
+		const commitContractUpdates = false
+		err := storage.Commit(inter, commitContractUpdates)
+		require.NoError(t, err)
+
+		// Check storage health after commit
+		err = storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Check writes to underlying storage
+		require.Equal(t, 4, len(writeEntries))
+
+		// writes[0]: domain register
+		// storage domain register is removed
+		require.Equal(t, address[:], writeEntries[0].Owner)
+		require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key)
+		require.True(t, len(writeEntries[0].Value) == 0)
+
+		// writes[1]: account register
+		// account register is created
+		require.Equal(t, address[:], writeEntries[1].Owner)
+		require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key)
+		require.True(t, len(writeEntries[1].Value) > 0)
+
+		// writes[2]: storage domain storage map
+		// storage domain storage map is removed because it is inlined in account storage map.
+		require.Equal(t, address[:], writeEntries[2].Owner)
+		require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key)
+		require.True(t, len(writeEntries[2].Value) == 0)
+
+		// writes[3]: account storage map
+		// account storage map is created with inlined domain storage map.
+		require.Equal(t, address[:], writeEntries[3].Owner)
+		require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key)
+		require.True(t, len(writeEntries[3].Value) > 0)
+
+		// Verify account storage map data
+		checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+	})
+
+	// This test storage map operations (including account migration) with intermittent Commit()
+	// - read domain storage map and commit
+	// - write to domain storage map and commit (including account migration)
+	// - remove all elements from domain storage map and commit
+	// - read domain storage map and commit
+	t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) {
+
+		var writeEntries []OwnerKeyValue
+
+		domains := []common.StorageDomain{common.PathDomainStorage.StorageDomain()}
+		const domainStorageMapCount = 5
+
+		// Create storage with existing account storage map
+		ledger, accountValues := newTestLedgerWithUnmigratedAccount(
+			nil,
+			LedgerOnWriteEntries(&writeEntries),
+			address,
+			domains,
+			domainStorageMapCount,
+		)
+		storage := NewStorage(
+			ledger,
+			nil,
+			StorageConfig{
+				StorageFormatV2Enabled: true,
+			},
+		)
+
+		inter := NewTestInterpreterWithStorage(t, storage)
+
+		random := rand.New(rand.NewSource(42))
+
+		// Read domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				for k, expectedValue := range domainValues {
+					v := domainStorageMap.ReadValue(nil, k)
+					ev := v.(interpreter.EquatableValue)
+					require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue))
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+		}
+
+		// Update domain storage map, and commit changes (account is migrated during commmit)
+		{
+			// update existing domain storage map (loaded from storage)
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues))
+				for k := range domainValues { //nolint:maprange
+					domainKeys = append(domainKeys, k)
+				}
+
+				// Update elements
+				for _, k := range domainKeys {
+					n := random.Int()
+					newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+					// Update existing element
+					existed := domainStorageMap.WriteValue(inter, k, newValue)
+					require.True(t, existed)
+
+					domainValues[k] = newValue
+				}
+			}
+
+			// TODO:
+			storage.ScheduleV2MigrationForModifiedAccounts()
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 4, len(writeEntries))
+
+			// writes[0]: storage domain register
+			// Storage domain register is removed
+			require.Equal(t, address[:], writeEntries[0].Owner)
+			require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key)
+			require.True(t, len(writeEntries[0].Value) == 0)
+
+			// writes[1]: account register
+			// Account register is created
+			require.Equal(t, address[:], writeEntries[1].Owner)
+			require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key)
+			require.True(t, len(writeEntries[1].Value) > 0)
+
+			// writes[2]: storage domain storage map
+			// storage domain storage map is removed because it is inlined in account storage map.
+			require.Equal(t, address[:], writeEntries[2].Owner)
+			require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key)
+			require.True(t, len(writeEntries[2].Value) == 0)
+
+			// writes[3]: account storage map
+			// account storage map is created with inlined domain storage map.
+			require.Equal(t, address[:], writeEntries[3].Owner)
+			require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key)
+			require.True(t, len(writeEntries[3].Value) > 0)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+
+			writeEntries = nil
+		}
+
+		// Remove all elements from domain storage map, and commit changes
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues))
+				for k := range domainValues { //nolint:maprange
+					domainKeys = append(domainKeys, k)
+				}
+
+				// Remove elements
+				for _, k := range domainKeys {
+
+					// Update existing element
+					existed := domainStorageMap.WriteValue(inter, k, nil)
+					require.True(t, existed)
+
+					delete(domainValues, k)
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Check writes to underlying storage
+			require.Equal(t, 1, len(writeEntries))
+
+			// writes[0]: account storage map
+			// account storage map is modified because inlined domain storage map is modified.
+			require.Equal(t, address[:], writeEntries[0].Owner)
+			require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[0].Key)
+			require.True(t, len(writeEntries[0].Value) > 0)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+
+		// Read domain storage map and commit
+		{
+			for _, domain := range domains {
+				const createIfNotExists = false
+				domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+				require.NotNil(t, domainStorageMap)
+
+				domainValues := accountValues[domain]
+
+				require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count())
+
+				for k, expectedValue := range domainValues {
+					v := domainStorageMap.ReadValue(nil, k)
+					ev := v.(interpreter.EquatableValue)
+					require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue))
+				}
+			}
+
+			// Commit changes
+			const commitContractUpdates = false
+			err := storage.Commit(inter, commitContractUpdates)
+			require.NoError(t, err)
+
+			// Check storage health after commit
+			err = storage.CheckHealth()
+			require.NoError(t, err)
+
+			// Verify account storage map data
+			checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+		}
+	})
+}
+
+// TestRuntimeStorageDomainStorageMapInlinedState tests inlined state
+// of domain storage map when large number of elements are inserted,
+// updated, and removed from domain storage map.
+// Initially domain storage map is inlined in account storage map, it
+// becomes un-inlined when large number elements are inserted, and then
+// inlined again when all elements are removed.
+func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) {
+	random := rand.New(rand.NewSource(42))
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	// Create empty storage
+	ledger := NewTestLedger(nil, nil)
+	storage := NewStorage(
+		ledger,
+		nil,
+		StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	inter := NewTestInterpreterWithStorage(t, storage)
+
+	domains := []common.StorageDomain{
+		common.PathDomainStorage.StorageDomain(),
+		common.PathDomainPublic.StorageDomain(),
+		common.PathDomainPrivate.StorageDomain(),
+	}
+
+	const domainStorageMapCount = 500
+
+	accountValues := make(accountStorageMapValues)
+
+	for _, domain := range domains {
+
+		// Create domain storage map
+		const createIfNotExists = true
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+		require.NotNil(t, domainStorageMap)
+		require.True(t, domainStorageMap.Inlined())
+
+		valueID := domainStorageMap.ValueID()
+
+		accountValues[domain] = make(domainStorageMapValues)
+
+		domainValues := accountValues[domain]
+
+		// Insert new values to domain storage map
+		for domainStorageMap.Count() < domainStorageMapCount {
+			n := random.Int()
+			key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+			if _, exists := domainValues[key]; exists {
+				continue
+			}
+			value := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+			existed := domainStorageMap.WriteValue(inter, key, value)
+			require.False(t, existed)
+
+			domainValues[key] = value
+		}
+
+		require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.False(t, domainStorageMap.Inlined())
+
+		// Check storage health
+		err := storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Overwrite values in domain storage map
+		for key := range domainValues {
+			n := random.Int()
+			value := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+			existed := domainStorageMap.WriteValue(inter, key, value)
+			require.True(t, existed)
+
+			domainValues[key] = value
+		}
+
+		require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.False(t, domainStorageMap.Inlined())
+
+		// Check storage health
+		err = storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Remove all values in domain storage map
+		for key := range domainValues {
+			existed := domainStorageMap.WriteValue(inter, key, nil)
+			require.True(t, existed)
+
+			delete(domainValues, key)
+		}
+
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.True(t, domainStorageMap.Inlined())
+	}
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(t, err)
+
+	// Check storage health
+	err = storage.CheckHealth()
+	require.NoError(t, err)
+
+	// There should be 2 non-empty registers in ledger after commits:
+	// - account register (key is "stored")
+	// - account storage map (atree slab)
+	nonEmptyRegisters := make(map[string][]byte)
+	for k, v := range ledger.StoredValues {
+		if len(v) > 0 {
+			nonEmptyRegisters[k] = v
+		}
+	}
+	require.Equal(t, 2, len(nonEmptyRegisters))
+
+	accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey]
+	require.True(t, accountRegisterExists)
+	require.Equal(t, 8, len(accountRegisterValue))
+
+	_, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)]
+	require.True(t, accountStorageMapRegisterExists)
+
+	checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+}
+
+// TestRuntimeStorageLargeDomainValues tests large values
+// in domain storage map.
+func TestRuntimeStorageLargeDomainValues(t *testing.T) {
+	random := rand.New(rand.NewSource(42))
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	// Create empty storage
+	ledger := NewTestLedger(nil, nil)
+	storage := NewStorage(
+		ledger,
+		nil,
+		StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	inter := NewTestInterpreterWithStorage(t, storage)
+
+	domains := []common.StorageDomain{
+		common.PathDomainStorage.StorageDomain(),
+		common.PathDomainPublic.StorageDomain(),
+		common.PathDomainPrivate.StorageDomain(),
+	}
+
+	const domainStorageMapCount = 5
+
+	accountValues := make(accountStorageMapValues)
+
+	for _, domain := range domains {
+
+		// Create domain storage map
+		const createIfNotExists = true
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+		require.NotNil(t, domainStorageMap)
+		require.True(t, domainStorageMap.Inlined())
+
+		valueID := domainStorageMap.ValueID()
+
+		accountValues[domain] = make(domainStorageMapValues)
+
+		domainValues := accountValues[domain]
+
+		// Insert new values to domain storage map
+		for domainStorageMap.Count() < domainStorageMapCount {
+			n := random.Int()
+			key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+			if _, exists := domainValues[key]; exists {
+				continue
+			}
+			value := interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000))
+
+			existed := domainStorageMap.WriteValue(inter, key, value)
+			require.False(t, existed)
+
+			domainValues[key] = value
+		}
+
+		require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.True(t, domainStorageMap.Inlined())
+
+		// Check storage health
+		err := storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Overwrite values in domain storage map
+		for key := range domainValues {
+			value := interpreter.NewUnmeteredStringValue(strings.Repeat("b", 1_000))
+
+			existed := domainStorageMap.WriteValue(inter, key, value)
+			require.True(t, existed)
+
+			domainValues[key] = value
+		}
+
+		require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.True(t, domainStorageMap.Inlined())
+
+		// Check storage health
+		err = storage.CheckHealth()
+		require.NoError(t, err)
+
+		// Remove all values in domain storage map
+		for key := range domainValues {
+			existed := domainStorageMap.WriteValue(inter, key, nil)
+			require.True(t, existed)
+
+			delete(domainValues, key)
+		}
+
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+		require.Equal(t, valueID, domainStorageMap.ValueID())
+		require.True(t, domainStorageMap.Inlined())
+	}
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(t, err)
+
+	// Check storage health
+	err = storage.CheckHealth()
+	require.NoError(t, err)
+
+	// There should be 2 non-empty registers in ledger after commits:
+	// - account register (key is "stored")
+	// - account storage map (atree slab)
+	nonEmptyRegisters := make(map[string][]byte)
+	for k, v := range ledger.StoredValues {
+		if len(v) > 0 {
+			nonEmptyRegisters[k] = v
+		}
+	}
+	require.Equal(t, 2, len(nonEmptyRegisters))
+
+	accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey]
+	require.True(t, accountRegisterExists)
+	require.Equal(t, 8, len(accountRegisterValue))
+
+	_, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)]
+	require.True(t, accountStorageMapRegisterExists)
+
+	checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+}
+
+func TestDomainRegisterMigrationForLargeAccount(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	var writeCount int
+
+	accountsInfo := []accountInfo{
+		{
+			address: address,
+			domains: []domainInfo{
+				{domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3},
+				{domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3},
+				{domain: common.PathDomainPrivate.StorageDomain(), domainStorageMapCount: 100, maxDepth: 3},
+			},
+		},
+	}
+
+	ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(
+		t,
+		nil,
+		LedgerOnWriteCounter(&writeCount),
+		accountsInfo,
+	)
+	storage := NewStorage(
+		ledger,
+		nil,
+		StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	inter := NewTestInterpreterWithStorage(t, storage)
+
+	accountValues := accountsValues[address]
+
+	// Create new domain storage map
+	const createIfNotExists = true
+	domain := common.StorageDomainInbox
+	domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+	require.NotNil(t, domainStorageMap)
+
+	accountValues[domain] = make(domainStorageMapValues)
+
+	// TODO:
+	storage.ScheduleV2MigrationForModifiedAccounts()
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(t, err)
+
+	// Check there are writes to underlying storage
+	require.True(t, writeCount > 0)
+
+	// Check there isn't any domain registers
+	nonAtreeRegisters := make(map[string][]byte)
+	for k, v := range ledger.StoredValues {
+		if len(v) == 0 {
+			continue
+		}
+		ks := strings.Split(k, "|")
+		if ks[1][0] != '$' {
+			nonAtreeRegisters[k] = v
+		}
+	}
+
+	require.Equal(t, 1, len(nonAtreeRegisters))
+	for k := range nonAtreeRegisters {
+		ks := strings.Split(k, "|")
+		require.Equal(t, address[:], []byte(ks[0]))
+		require.Equal(t, AccountStorageKey, ks[1])
+	}
+
+	// Check storage health after commit
+	err = storage.CheckHealth()
+	require.NoError(t, err)
+
+	// Verify account storage map data
+	checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues)
+}
+
+func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	testCases := []struct {
+		name                                       string
+		storageFormatV2Enabled                     bool
+		domain                                     common.StorageDomain
+		createIfNotExists                          bool
+		expectedDomainStorageMapIsNil              bool
+		expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsSet                           map[string]struct{}
+	}{
+		// Test cases with storageFormatV2Enabled = false
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false",
+			storageFormatV2Enabled:        false,
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: true,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true",
+			storageFormatV2Enabled:        false,
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reads from the second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		// Test cases with storageFormatV2Enabled = true
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false",
+			storageFormatV2Enabled:        true,
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: true,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// Second GetDomainStorageMap() has the same register reading as the first GetDomainStorageMap()
+				// because account status can't be cached in previous call.
+
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):          {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true",
+			storageFormatV2Enabled:        true,
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Check all domain registers
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainStorage.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainPrivate.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainPublic.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainContract.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainInbox.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainCapabilityController.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainCapabilityControllerTag.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathCapability.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainAccountCapability.Identifier()),
+				},
+				// Read account register to load account storage map
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reads from the second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):                      {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage):             {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate):             {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic):              {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainContract):                {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainInbox):                   {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityController):    {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityControllerTag): {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathCapability):          {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainAccountCapability):       {},
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+
+			var ledgerReads []ownerKeyPair
+			ledgerReadsSet := make(map[string]struct{})
+
+			// Create empty storage
+			ledger := NewTestLedger(
+				func(owner, key, _ []byte) {
+					ledgerReads = append(
+						ledgerReads,
+						ownerKeyPair{
+							owner: owner,
+							key:   key,
+						},
+					)
+					ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{}
+				},
+				nil)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads)
+
+			ledgerReads = ledgerReads[:0]
+
+			// Call GetDomainStorageMap() again to test account status is cached and no register reading is needed.
+
+			domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads)
+
+			// Check underlying ledger reads
+			require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet))
+			for k := range ledgerReadsSet {
+				require.Contains(t, tc.expectedReadsSet, k)
+			}
+		})
+	}
+}
+
+func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) {
+
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64)
+
+	createV1AccountWithDomain := func(
+		address common.Address,
+		domain common.StorageDomain,
+	) getStorageDataFunc {
+		return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) {
+			ledger := NewTestLedger(nil, nil)
+
+			persistentSlabStorage := NewPersistentSlabStorage(ledger, nil)
+
+			orderedMap, err := atree.NewMap(
+				persistentSlabStorage,
+				atree.Address(address),
+				atree.NewDefaultDigesterBuilder(),
+				interpreter.EmptyTypeInfo{},
+			)
+			require.NoError(t, err)
+
+			slabIndex := orderedMap.SlabID().Index()
+
+			for i := range 3 {
+
+				key := interpreter.StringStorageMapKey(strconv.Itoa(i))
+
+				value := interpreter.NewUnmeteredIntValueFromInt64(int64(i))
+
+				existingStorable, err := orderedMap.Set(
+					key.AtreeValueCompare,
+					key.AtreeValueHashInput,
+					key.AtreeValue(),
+					value,
+				)
+				require.NoError(t, err)
+				require.Nil(t, existingStorable)
+			}
+
+			// Commit domain storage map
+			err = persistentSlabStorage.FastCommit(runtime.NumCPU())
+			require.NoError(t, err)
+
+			// Create domain register
+			err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:])
+			require.NoError(t, err)
+
+			return ledger.StoredValues, ledger.StorageIndices
+		}
+	}
+
+	testCases := []struct {
+		name                                       string
+		getStorageData                             getStorageDataFunc
+		storageFormatV2Enabled                     bool
+		domain                                     common.StorageDomain
+		createIfNotExists                          bool
+		expectedDomainStorageMapIsNil              bool
+		expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsSet                           map[string]struct{}
+	}{
+		// Test cases with storageFormatV2Enabled = false
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false",
+			storageFormatV2Enabled:        false,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: true,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true",
+			storageFormatV2Enabled:        false,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading in second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap(0).
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = false",
+			storageFormatV2Enabled:        false,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read domain storage map register
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading in second GetDomainStorageMap() because
+				// domain storage map is loaded and cached in the first
+				// GetDomainStorageMap(0).
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage):  {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = true",
+			storageFormatV2Enabled:        false,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read domain storage map register
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading in second GetDomainStorageMap() because
+				// domain storage map is loaded and cached in the first
+				// GetDomainStorageMap(0).
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage):  {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		// Test cases with storageFormatV2Enabled = true
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false",
+			storageFormatV2Enabled:        true,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: true,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):          {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true",
+			storageFormatV2Enabled:        true,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Check all domain registers until any existing domain is checked
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainStorage.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainPrivate.Identifier()),
+				},
+				{
+					owner: address[:],
+					key:   []byte(common.PathDomainPublic.Identifier()),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):          {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic):  {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = false",
+			storageFormatV2Enabled:        true,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read domain storage map register
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage):  {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		{
+			name:                          "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = true",
+			storageFormatV2Enabled:        true,
+			getStorageData:                createV1AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Check given domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read given domain register
+				{
+					owner: address[:],
+					key:   []byte(common.StorageDomainPathStorage.Identifier()),
+				},
+				// Read domain storage map register
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage):  {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+
+			storedValues, storedIndices := tc.getStorageData()
+
+			var ledgerReads []ownerKeyPair
+			ledgerReadsSet := make(map[string]struct{})
+
+			ledger := NewTestLedgerWithData(
+				func(owner, key, _ []byte) {
+					ledgerReads = append(
+						ledgerReads,
+						ownerKeyPair{
+							owner: owner,
+							key:   key,
+						},
+					)
+					ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{}
+				},
+				nil,
+				storedValues,
+				storedIndices,
+			)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads)
+
+			ledgerReads = ledgerReads[:0]
+
+			domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads)
+
+			// Check underlying ledger reads
+			require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet))
+			for k := range ledgerReadsSet {
+				require.Contains(t, tc.expectedReadsSet, k)
+			}
+		})
+	}
+}
+
+func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64)
+
+	createV2AccountWithDomain := func(
+		address common.Address,
+		domain common.StorageDomain,
+	) getStorageDataFunc {
+		return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) {
+			ledger := NewTestLedger(nil, nil)
+
+			persistentSlabStorage := NewPersistentSlabStorage(ledger, nil)
+
+			accountOrderedMap, err := atree.NewMap(
+				persistentSlabStorage,
+				atree.Address(address),
+				atree.NewDefaultDigesterBuilder(),
+				interpreter.EmptyTypeInfo{},
+			)
+			require.NoError(t, err)
+
+			slabIndex := accountOrderedMap.SlabID().Index()
+
+			domainOrderedMap, err := atree.NewMap(
+				persistentSlabStorage,
+				atree.Address(address),
+				atree.NewDefaultDigesterBuilder(),
+				interpreter.EmptyTypeInfo{},
+			)
+			require.NoError(t, err)
+
+			domainKey := interpreter.Uint64StorageMapKey(domain)
+
+			existingDomain, err := accountOrderedMap.Set(
+				domainKey.AtreeValueCompare,
+				domainKey.AtreeValueHashInput,
+				domainKey.AtreeValue(),
+				domainOrderedMap,
+			)
+			require.NoError(t, err)
+			require.Nil(t, existingDomain)
+
+			for i := range 3 {
+
+				key := interpreter.StringStorageMapKey(strconv.Itoa(i))
+
+				value := interpreter.NewUnmeteredIntValueFromInt64(int64(i))
+
+				existingStorable, err := domainOrderedMap.Set(
+					key.AtreeValueCompare,
+					key.AtreeValueHashInput,
+					key.AtreeValue(),
+					value,
+				)
+				require.NoError(t, err)
+				require.Nil(t, existingStorable)
+			}
+
+			// Commit domain storage map
+			err = persistentSlabStorage.FastCommit(runtime.NumCPU())
+			require.NoError(t, err)
+
+			// Create account register
+			err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:])
+			require.NoError(t, err)
+
+			return ledger.StoredValues, ledger.StorageIndices
+		}
+	}
+
+	testCases := []struct {
+		name                                       string
+		getStorageData                             getStorageDataFunc
+		domain                                     common.StorageDomain
+		createIfNotExists                          bool
+		expectedDomainStorageMapIsNil              bool
+		expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair
+		expectedReadsSet                           map[string]struct{}
+	}{
+		{
+			name:                          "domain storage map does not exist, createIfNotExists = false",
+			getStorageData:                createV2AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: true,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account register
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account storage map
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap because
+				// account storage map is loaded and cached from first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		{
+			name:                          "domain storage map does not exist, createIfNotExists = true",
+			getStorageData:                createV2AccountWithDomain(address, common.StorageDomainPathPublic),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account register
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account storage map
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		{
+			name:                          "domain storage map exists, createIfNotExists = false",
+			getStorageData:                createV2AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             false,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account register
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account storage map
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+		{
+			name:                          "domain storage map exists, createIfNotExists = true",
+			getStorageData:                createV2AccountWithDomain(address, common.StorageDomainPathStorage),
+			domain:                        common.StorageDomainPathStorage,
+			createIfNotExists:             true,
+			expectedDomainStorageMapIsNil: false,
+			expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{
+				// Check if account is v2
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account register
+				{
+					owner: address[:],
+					key:   []byte(AccountStorageKey),
+				},
+				// Read account storage map
+				{
+					owner: address[:],
+					key:   []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1},
+				},
+			},
+			expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{
+				// No register reading from second GetDomainStorageMap() because
+				// domain storage map is created and cached in the first
+				// GetDomainStorageMap().
+			},
+			expectedReadsSet: map[string]struct{}{
+				concatRegisterAddressAndKey(address, []byte(AccountStorageKey)):           {},
+				concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {},
+			},
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+
+			storedValues, storedIndices := tc.getStorageData()
+
+			var ledgerReads []ownerKeyPair
+			ledgerReadsSet := make(map[string]struct{})
+
+			ledger := NewTestLedgerWithData(
+				func(owner, key, _ []byte) {
+					ledgerReads = append(
+						ledgerReads,
+						ownerKeyPair{
+							owner: owner,
+							key:   key,
+						},
+					)
+					ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{}
+				},
+				nil,
+				storedValues,
+				storedIndices,
+			)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: true,
+				},
+			)
+
+			inter := NewTestInterpreterWithStorage(t, storage)
+
+			domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads)
+
+			ledgerReads = ledgerReads[:0]
+
+			domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists)
+			require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil)
+			require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads)
+
+			// Check underlying ledger reads
+			require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet))
+			for k := range ledgerReadsSet {
+				require.Contains(t, tc.expectedReadsSet, k)
+			}
+		})
+	}
+}
+
+func TestAccountStorageFormatForNonExistingAccount(t *testing.T) {
+
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	testCases := []struct {
+		name                   string
+		storageFormatV2Enabled bool
+		format                 StorageFormat
+	}{
+		{
+			name:                   "non-existing account, storageFormatV2Enabled = false",
+			storageFormatV2Enabled: false,
+			format:                 StorageFormatUnknown,
+		},
+		{
+			name:                   "non-existing account, storageFormatV2Enabled = true",
+			storageFormatV2Enabled: true,
+			format:                 StorageFormatUnknown,
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+			ledger := NewTestLedger(nil, nil)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			)
+
+			for range 2 {
+				format := storage.AccountStorageFormat(address)
+				require.Equal(t, tc.format, format)
+			}
+		})
+	}
+}
+
+func TestAccountStorageFormatForV1Account(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	createV1AccountWithDomain := func(
+		address common.Address,
+		domain common.StorageDomain,
+	) (storedValues map[string][]byte, StorageIndices map[string]uint64) {
+		ledger := NewTestLedger(nil, nil)
+
+		persistentSlabStorage := NewPersistentSlabStorage(ledger, nil)
+
+		orderedMap, err := atree.NewMap(
+			persistentSlabStorage,
+			atree.Address(address),
+			atree.NewDefaultDigesterBuilder(),
+			interpreter.EmptyTypeInfo{},
+		)
+		require.NoError(t, err)
+
+		slabIndex := orderedMap.SlabID().Index()
+
+		for i := range 3 {
+
+			key := interpreter.StringStorageMapKey(strconv.Itoa(i))
+
+			value := interpreter.NewUnmeteredIntValueFromInt64(int64(i))
+
+			existingStorable, err := orderedMap.Set(
+				key.AtreeValueCompare,
+				key.AtreeValueHashInput,
+				key.AtreeValue(),
+				value,
+			)
+			require.NoError(t, err)
+			require.Nil(t, existingStorable)
+		}
+
+		// Commit domain storage map
+		err = persistentSlabStorage.FastCommit(runtime.NumCPU())
+		require.NoError(t, err)
+
+		// Create domain register
+		err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:])
+		require.NoError(t, err)
+
+		return ledger.StoredValues, ledger.StorageIndices
+	}
+
+	testCases := []struct {
+		name                   string
+		storageFormatV2Enabled bool
+		format                 StorageFormat
+	}{
+		{
+			name:                   "v1 account, storageFormatV2Enabled = false",
+			storageFormatV2Enabled: false,
+			format:                 StorageFormatV1,
+		},
+		{
+			name:                   "v1 account, storageFormatV2Enabled = true",
+			storageFormatV2Enabled: true,
+			format:                 StorageFormatV1,
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+
+			storedValues, storedIndices := createV1AccountWithDomain(
+				address,
+				common.StorageDomainPathStorage,
+			)
+
+			ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			)
+
+			for range 2 {
+				format := storage.AccountStorageFormat(address)
+				require.Equal(t, tc.format, format)
+			}
+		})
+	}
+}
+
+func TestAccountStorageFormatForV2Account(t *testing.T) {
+	t.Parallel()
+
+	address := common.MustBytesToAddress([]byte{0x1})
+
+	createV2AccountWithDomain := func(
+		address common.Address,
+		domain common.StorageDomain,
+	) (storedValues map[string][]byte, StorageIndices map[string]uint64) {
+		ledger := NewTestLedger(nil, nil)
+
+		persistentSlabStorage := NewPersistentSlabStorage(ledger, nil)
+
+		accountOrderedMap, err := atree.NewMap(
+			persistentSlabStorage,
+			atree.Address(address),
+			atree.NewDefaultDigesterBuilder(),
+			interpreter.EmptyTypeInfo{},
+		)
+		require.NoError(t, err)
+
+		slabIndex := accountOrderedMap.SlabID().Index()
+
+		domainOrderedMap, err := atree.NewMap(
+			persistentSlabStorage,
+			atree.Address(address),
+			atree.NewDefaultDigesterBuilder(),
+			interpreter.EmptyTypeInfo{},
+		)
+		require.NoError(t, err)
+
+		domainKey := interpreter.Uint64StorageMapKey(domain)
+
+		existingDomain, err := accountOrderedMap.Set(
+			domainKey.AtreeValueCompare,
+			domainKey.AtreeValueHashInput,
+			domainKey.AtreeValue(),
+			domainOrderedMap,
+		)
+		require.NoError(t, err)
+		require.Nil(t, existingDomain)
+
+		for i := range 3 {
+
+			key := interpreter.StringStorageMapKey(strconv.Itoa(i))
+
+			value := interpreter.NewUnmeteredIntValueFromInt64(int64(i))
+
+			existingStorable, err := domainOrderedMap.Set(
+				key.AtreeValueCompare,
+				key.AtreeValueHashInput,
+				key.AtreeValue(),
+				value,
+			)
+			require.NoError(t, err)
+			require.Nil(t, existingStorable)
+		}
+
+		// Commit domain storage map
+		err = persistentSlabStorage.FastCommit(runtime.NumCPU())
+		require.NoError(t, err)
+
+		// Create account register
+		err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:])
+		require.NoError(t, err)
+
+		return ledger.StoredValues, ledger.StorageIndices
+	}
+
+	testCases := []struct {
+		name                   string
+		storageFormatV2Enabled bool
+		format                 StorageFormat
+	}{
+		{
+			name:                   "v2 account, storageFormatV2Enabled = true",
+			storageFormatV2Enabled: true,
+			format:                 StorageFormatV2,
+		},
+	}
+
+	for _, tc := range testCases {
+		t.Run(tc.name, func(t *testing.T) {
+
+			storedValues, storedIndices := createV2AccountWithDomain(
+				address,
+				common.StorageDomainPathStorage,
+			)
+
+			ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices)
+
+			storage := NewStorage(
+				ledger,
+				nil,
+				StorageConfig{
+					StorageFormatV2Enabled: tc.storageFormatV2Enabled,
+				},
+			)
+
+			for range 2 {
+				format := storage.AccountStorageFormat(address)
+				require.Equal(t, tc.format, format)
+			}
+		})
+	}
+}
+
+// createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map.
+func createAndWriteAccountStorageMap(
+	t testing.TB,
+	storage *Storage,
+	inter *interpreter.Interpreter,
+	address common.Address,
+	domains []common.StorageDomain,
+	count int,
+	random *rand.Rand,
+) accountStorageMapValues {
+
+	accountValues := make(accountStorageMapValues)
+
+	// Create domain storage map
+	for _, domain := range domains {
+		const createIfNotExists = true
+		domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists)
+		require.NotNil(t, domainStorageMap)
+		require.Equal(t, uint64(0), domainStorageMap.Count())
+
+		// Write to to domain storage map
+		accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, count, random)
+	}
+
+	// Commit changes
+	const commitContractUpdates = false
+	err := storage.Commit(inter, commitContractUpdates)
+	require.NoError(t, err)
+
+	// Check storage health after commit
+	err = storage.CheckHealth()
+	require.NoError(t, err)
+
+	return accountValues
+}
+
+func writeToDomainStorageMap(
+	inter *interpreter.Interpreter,
+	domainStorageMap *interpreter.DomainStorageMap,
+	count int,
+	random *rand.Rand,
+) domainStorageMapValues {
+	domainValues := make(domainStorageMapValues)
+
+	for len(domainValues) < count {
+		n := random.Int()
+
+		key := interpreter.StringStorageMapKey(strconv.Itoa(n))
+
+		value := interpreter.NewUnmeteredIntValueFromInt64(int64(n))
+
+		domainStorageMap.WriteValue(inter, key, value)
+
+		domainValues[key] = value
+	}
+
+	return domainValues
+}
+
+// checkAccountStorageMapData creates new storage with given storedValues,
+// and compares account storage map values with given expectedAccountValues.
+func checkAccountStorageMapData(
+	tb testing.TB,
+	storedValues map[string][]byte,
+	storageIndices map[string]uint64,
+	address common.Address,
+	expectedAccountValues accountStorageMapValues,
+) {
+	// Create storage with given storedValues and storageIndices
+	ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices)
+	storage := NewStorage(
+		ledger,
+		nil,
+		StorageConfig{
+			StorageFormatV2Enabled: true,
+		},
+	)
+
+	inter := NewTestInterpreterWithStorage(tb, storage)
+
+	// Get account register
+	accountStorageMapSlabIndex, err := ledger.GetValue(address[:], []byte(AccountStorageKey))
+	require.NoError(tb, err)
+	require.Equal(tb, 8, len(accountStorageMapSlabIndex))
+
+	// Load account storage map
+	accountSlabID := atree.NewSlabID(
+		atree.Address(address[:]),
+		atree.SlabIndex(accountStorageMapSlabIndex[:]),
+	)
+	accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountSlabID)
+	require.NotNil(tb, accountStorageMap)
+	require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count())
+
+	domainCount := 0
+	iter := accountStorageMap.Iterator()
+	for {
+		domain, domainStorageMap := iter.Next()
+		if domain == common.StorageDomainUnknown {
+			break
+		}
+
+		domainCount++
+
+		expectedDomainValues, exist := expectedAccountValues[domain]
+		require.True(tb, exist)
+		require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count())
+
+		// Check values stored in domain storage map
+		for key, expectedValue := range expectedDomainValues {
+			value := domainStorageMap.ReadValue(nil, key)
+
+			ev, ok := value.(interpreter.EquatableValue)
+			require.True(tb, ok)
+			require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue))
+		}
+	}
+
+	require.Equal(tb, len(expectedAccountValues), domainCount)
+
+	// Check atree storage health
+	rootSlabIDs, err := atree.CheckStorageHealth(storage.PersistentSlabStorage, 1)
+	require.NoError(tb, err)
+	require.Equal(tb, 1, len(rootSlabIDs))
+	require.Contains(tb, rootSlabIDs, accountSlabID)
+}
+
+func concatRegisterAddressAndKey(
+	address common.Address,
+	key []byte,
+) string {
+	return string(address[:]) + "|" + string(key)
+}
+
+func concatRegisterAddressAndDomain(
+	address common.Address,
+	domain common.StorageDomain,
+) string {
+	return string(address[:]) + "|" + domain.Identifier()
+}
diff --git a/runtime/transaction_executor.go b/runtime/transaction_executor.go
index a8d3f30a90..f071aeb8e2 100644
--- a/runtime/transaction_executor.go
+++ b/runtime/transaction_executor.go
@@ -106,7 +106,13 @@ func (executor *interpreterTransactionExecutor) preprocess() (err error) {
 
 	runtimeInterface := context.Interface
 
-	storage := NewStorage(runtimeInterface, runtimeInterface)
+	storage := NewStorage(
+		runtimeInterface,
+		runtimeInterface,
+		StorageConfig{
+			StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled,
+		},
+	)
 	executor.storage = storage
 
 	environment := context.Environment
diff --git a/stdlib/account.go b/stdlib/account.go
index 201affa202..54bc1a1892 100644
--- a/stdlib/account.go
+++ b/stdlib/account.go
@@ -3242,7 +3242,8 @@ func recordStorageCapabilityController(
 
 	storageMapKey := interpreter.StringStorageMapKey(identifier)
 
-	storageMap := inter.Storage().GetStorageMap(
+	storageMap := inter.Storage().GetDomainStorageMap(
+		inter,
 		address,
 		common.StorageDomainPathCapability,
 		true,
@@ -3284,7 +3285,8 @@ func getPathCapabilityIDSet(
 
 	storageMapKey := interpreter.StringStorageMapKey(identifier)
 
-	storageMap := inter.Storage().GetStorageMap(
+	storageMap := inter.Storage().GetDomainStorageMap(
+		inter,
 		address,
 		common.StorageDomainPathCapability,
 		false,
@@ -3334,7 +3336,8 @@ func unrecordStorageCapabilityController(
 	// Remove capability set if empty
 
 	if capabilityIDSet.Count() == 0 {
-		storageMap := inter.Storage().GetStorageMap(
+		storageMap := inter.Storage().GetDomainStorageMap(
+			inter,
 			address,
 			common.StorageDomainPathCapability,
 			true,
@@ -3402,7 +3405,8 @@ func recordAccountCapabilityController(
 
 	storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue)
 
-	storageMap := inter.Storage().GetStorageMap(
+	storageMap := inter.Storage().GetDomainStorageMap(
+		inter,
 		address,
 		common.StorageDomainAccountCapability,
 		true,
@@ -3429,7 +3433,8 @@ func unrecordAccountCapabilityController(
 
 	storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue)
 
-	storageMap := inter.Storage().GetStorageMap(
+	storageMap := inter.Storage().GetDomainStorageMap(
+		inter,
 		address,
 		common.StorageDomainAccountCapability,
 		true,
@@ -3448,7 +3453,8 @@ func getAccountCapabilityControllerIDsIterator(
 	nextCapabilityID func() (uint64, bool),
 	count uint64,
 ) {
-	storageMap := inter.Storage().GetStorageMap(
+	storageMap := inter.Storage().GetDomainStorageMap(
+		inter,
 		address,
 		common.StorageDomainAccountCapability,
 		false,
diff --git a/test_utils/interpreter_utils/interpreter.go b/test_utils/interpreter_utils/interpreter.go
index 46a8182023..e92a5264dc 100644
--- a/test_utils/interpreter_utils/interpreter.go
+++ b/test_utils/interpreter_utils/interpreter.go
@@ -29,14 +29,26 @@ import (
 
 func NewTestInterpreter(tb testing.TB) *interpreter.Interpreter {
 	storage := NewUnmeteredInMemoryStorage()
+	return NewTestInterpreterWithStorage(tb, storage)
+}
+
+func NewTestInterpreterWithStorage(tb testing.TB, storage interpreter.Storage) *interpreter.Interpreter {
+	return NewTestInterpreterWithStorageAndAtreeValidationConfig(tb, storage, true, true)
+}
 
+func NewTestInterpreterWithStorageAndAtreeValidationConfig(
+	tb testing.TB,
+	storage interpreter.Storage,
+	atreeValueValidationEnabled bool,
+	atreeStorageValidationEnabled bool,
+) *interpreter.Interpreter {
 	inter, err := interpreter.NewInterpreter(
 		nil,
 		TestLocation,
 		&interpreter.Config{
 			Storage:                       storage,
-			AtreeValueValidationEnabled:   true,
-			AtreeStorageValidationEnabled: true,
+			AtreeValueValidationEnabled:   atreeValueValidationEnabled,
+			AtreeStorageValidationEnabled: atreeStorageValidationEnabled,
 		},
 	)
 	require.NoError(tb, err)
diff --git a/test_utils/runtime_utils/storage.go b/test_utils/runtime_utils/storage.go
new file mode 100644
index 0000000000..0c971d8c5d
--- /dev/null
+++ b/test_utils/runtime_utils/storage.go
@@ -0,0 +1,42 @@
+/*
+ * Cadence - The resource-oriented smart contract programming language
+ *
+ * Copyright Flow Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package runtime_utils
+
+import (
+	"testing"
+
+	"github.com/onflow/atree"
+	"github.com/stretchr/testify/require"
+)
+
+func CheckAtreeStorageHealth(tb testing.TB, storage atree.SlabStorage, expectedRootSlabIDs []atree.SlabID) {
+	rootSlabIDs, err := atree.CheckStorageHealth(storage, -1)
+	require.NoError(tb, err)
+
+	nonTempRootSlabIDs := make([]atree.SlabID, 0, len(rootSlabIDs))
+
+	for rootSlabID := range rootSlabIDs { //nolint:maprange
+		if rootSlabID.HasTempAddress() {
+			continue
+		}
+		nonTempRootSlabIDs = append(nonTempRootSlabIDs, rootSlabID)
+	}
+
+	require.ElementsMatch(tb, nonTempRootSlabIDs, expectedRootSlabIDs)
+}
diff --git a/test_utils/runtime_utils/testledger.go b/test_utils/runtime_utils/testledger.go
index 4d4c846172..ef77d134f8 100644
--- a/test_utils/runtime_utils/testledger.go
+++ b/test_utils/runtime_utils/testledger.go
@@ -31,6 +31,7 @@ import (
 
 type TestLedger struct {
 	StoredValues        map[string][]byte
+	StorageIndices      map[string]uint64
 	OnValueExists       func(owner, key []byte) (exists bool, err error)
 	OnGetValue          func(owner, key []byte) (value []byte, err error)
 	OnSetValue          func(owner, key, value []byte) (err error)
@@ -92,9 +93,30 @@ func (s TestLedger) Dump() {
 	}
 }
 
+type LedgerOnRead func(owner, key, value []byte)
+type LedgerOnWrite func(owner, key, value []byte)
+
+type OwnerKeyValue struct {
+	Owner, Key, Value []byte
+}
+
+var LedgerOnWriteCounter = func(counter *int) LedgerOnWrite {
+	return func(_, _, _ []byte) {
+		(*counter)++
+	}
+}
+
+var LedgerOnWriteEntries = func(entries *[]OwnerKeyValue) LedgerOnWrite {
+	return func(owner, key, value []byte) {
+		*entries = append(
+			*entries,
+			OwnerKeyValue{Owner: owner, Key: key, Value: value})
+	}
+}
+
 func NewTestLedger(
-	onRead func(owner, key, value []byte),
-	onWrite func(owner, key, value []byte),
+	onRead LedgerOnRead,
+	onWrite LedgerOnWrite,
 ) TestLedger {
 
 	storedValues := map[string][]byte{}
@@ -102,7 +124,8 @@ func NewTestLedger(
 	storageIndices := map[string]uint64{}
 
 	return TestLedger{
-		StoredValues: storedValues,
+		StoredValues:   storedValues,
+		StorageIndices: storageIndices,
 		OnValueExists: func(owner, key []byte) (bool, error) {
 			value := storedValues[TestStorageKey(string(owner), string(key))]
 			return len(value) > 0, nil
@@ -142,7 +165,8 @@ func NewTestLedgerWithData(
 	}
 
 	return TestLedger{
-		StoredValues: storedValues,
+		StoredValues:   storedValues,
+		StorageIndices: storageIndices,
 		OnValueExists: func(owner, key []byte) (bool, error) {
 			value := storedValues[storageKey(string(owner), string(key))]
 			return len(value) > 0, nil
diff --git a/tools/compatibility-check/go.mod b/tools/compatibility-check/go.mod
index 628bfdc184..ca326637cd 100644
--- a/tools/compatibility-check/go.mod
+++ b/tools/compatibility-check/go.mod
@@ -7,7 +7,7 @@ require (
 	github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0
 	github.com/onflow/flow-go v0.38.0-preview.0.0.20241018215103-774056466e36
 	github.com/rs/zerolog v1.29.0
-	github.com/stretchr/testify v1.9.0
+	github.com/stretchr/testify v1.10.0
 )
 
 require (
@@ -43,7 +43,7 @@ require (
 	github.com/multiformats/go-multibase v0.2.0 // indirect
 	github.com/multiformats/go-multihash v0.2.3 // indirect
 	github.com/multiformats/go-varint v0.0.7 // indirect
-	github.com/onflow/atree v0.8.0 // indirect
+	github.com/onflow/atree v0.8.1 // indirect
 	github.com/onflow/crypto v0.25.2 // indirect
 	github.com/onflow/flow-core-contracts/lib/go/templates v1.3.3-0.20241017220455-79fdc6c8ba53 // indirect
 	github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect
@@ -73,7 +73,7 @@ require (
 	github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect
 	github.com/vmihailenco/tagparser v0.1.1 // indirect
 	github.com/x448/float16 v0.8.4 // indirect
-	github.com/zeebo/blake3 v0.2.3 // indirect
+	github.com/zeebo/blake3 v0.2.4 // indirect
 	go.opentelemetry.io/otel v1.24.0 // indirect
 	golang.org/x/crypto v0.28.0 // indirect
 	golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect
diff --git a/tools/compatibility-check/go.sum b/tools/compatibility-check/go.sum
index 3c453fa707..39d0ed9012 100644
--- a/tools/compatibility-check/go.sum
+++ b/tools/compatibility-check/go.sum
@@ -278,7 +278,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
 github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
-github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
 github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
 github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -343,8 +342,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n
 github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0=
-github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo=
+github.com/onflow/atree v0.8.1 h1:DAnPnL9/Ks3LaAnkQVokokTBG/znTW0DJfovDtJDhLI=
+github.com/onflow/atree v0.8.1/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck=
 github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns=
 github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY=
 github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZnriEUE7bw9inC2AtdSn8lL/iwQLQ=
@@ -454,8 +453,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
-github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
 github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
 github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
 github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg=
@@ -480,11 +479,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
 github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
 github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
-github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
-github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
+github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
+github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE=
 github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
 github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
diff --git a/tools/storage-explorer/main.go b/tools/storage-explorer/main.go
index fe4f80504f..614143a5f7 100644
--- a/tools/storage-explorer/main.go
+++ b/tools/storage-explorer/main.go
@@ -184,7 +184,7 @@ func NewAccountStorageMapKeysHandler(
 		}
 
 		var keys []string
-		storageMap := storage.GetStorageMap(address, storageMapDomain, false)
+		storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false)
 		if storageMap == nil {
 			keys = make([]string, 0)
 		} else {
@@ -225,7 +225,7 @@ func NewAccountStorageMapValueHandler(
 			return
 		}
 
-		storageMap := storage.GetStorageMap(address, storageMapDomain, false)
+		storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false)
 		if storageMap == nil {
 			http.Error(w, "storage map does not exist", http.StatusNotFound)
 			return