Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make smoke tests check recently added data deduplication feature #350

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 11 additions & 9 deletions array.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ func (a *ArrayDataSlab) StoredValue(storage SlabStorage) (Value, error) {
}

var _ ArraySlab = &ArrayDataSlab{}
var _ ContainerStorable = &ArrayDataSlab{}

// ArrayMetaDataSlab is internal node, implementing ArraySlab.
type ArrayMetaDataSlab struct {
Expand Down Expand Up @@ -697,14 +698,14 @@ func DecodeInlinedArrayStorable(
}, nil
}

// encodeAsInlined encodes inlined array data slab. Encoding is
// EncodeAsElement encodes inlined array data slab. Encoding is
// version 1 with CBOR tag having tag number CBORTagInlinedArray,
// and tag contant as 3-element array:
//
// +------------------+----------------+----------+
// | extra data index | value ID index | elements |
// +------------------+----------------+----------+
func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error {
func (a *ArrayDataSlab) EncodeAsElement(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error {
if a.extraData == nil {
return NewEncodingError(
fmt.Errorf("failed to encode non-root array data slab as inlined"))
Expand Down Expand Up @@ -754,7 +755,8 @@ func (a *ArrayDataSlab) encodeAsInlined(enc *Encoder, inlinedTypeInfo *inlinedEx
// element 2: array elements
err = a.encodeElements(enc, inlinedTypeInfo)
if err != nil {
return NewEncodingError(err)
// err is already categorized by ArrayDataSlab.encodeElements().
return err
}

err = enc.CBOR.Flush()
Expand Down Expand Up @@ -817,7 +819,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error {
return NewEncodingError(err)
}

if a.hasPointer() {
if a.HasPointer() {
h.setHasPointers()
}

Expand Down Expand Up @@ -885,7 +887,7 @@ func (a *ArrayDataSlab) Encode(enc *Encoder) error {
return nil
}

func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExtraData) error {
func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *InlinedExtraData) error {
// Encode CBOR array size manually for fix-sized encoding

enc.Scratch[0] = 0x80 | 25
Expand All @@ -906,10 +908,10 @@ func (a *ArrayDataSlab) encodeElements(enc *Encoder, inlinedTypeInfo *inlinedExt

// Encode data slab content (array of elements)
for _, e := range a.elements {
err = encodeStorableAsElement(enc, e, inlinedTypeInfo)
err = EncodeStorableAsElement(enc, e, inlinedTypeInfo)
if err != nil {
// Wrap err as external error (if needed) because err is returned by Storable interface.
return wrapErrorfAsExternalErrorIfNeeded(err, "failed to encode array element")
// err is already categorized by encodeStorableAsElement().
return err
}
}

Expand Down Expand Up @@ -1000,7 +1002,7 @@ func (a *ArrayDataSlab) Uninline(storage SlabStorage) error {
return nil
}

func (a *ArrayDataSlab) hasPointer() bool {
func (a *ArrayDataSlab) HasPointer() bool {
for _, e := range a.elements {
if hasPointer(e) {
return true
Expand Down
21 changes: 17 additions & 4 deletions array_debug.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,8 +345,16 @@ func (v *arrayVerifier) verifyDataSlab(
}

// Verify that only root data slab can be inlined
if level > 0 && dataSlab.Inlined() {
return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id))
if dataSlab.Inlined() {
if level > 0 {
return 0, nil, nil, NewFatalError(fmt.Errorf("non-root slab %s is inlined", id))
}
if dataSlab.extraData == nil {
return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s doesn't have extra data", id))
}
if dataSlab.next != SlabIDUndefined {
return 0, nil, nil, NewFatalError(fmt.Errorf("inlined slab %s has next slab ID", id))
}
}

// Verify that aggregated element size + slab prefix is the same as header.size
Expand Down Expand Up @@ -524,6 +532,11 @@ func VerifyArraySerialization(
decodeTypeInfo TypeInfoDecoder,
compare StorableComparator,
) error {
// Skip verification of inlined array serialization.
if a.Inlined() {
return nil
}

v := &serializationVerifier{
storage: a.Storage,
cborDecMode: cborDecMode,
Expand All @@ -550,7 +563,7 @@ func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error {
id := slab.SlabID()

// Encode slab
data, err := Encode(slab, v.cborEncMode)
data, err := EncodeSlab(slab, v.cborEncMode)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by Encode().
return err
Expand All @@ -564,7 +577,7 @@ func (v *serializationVerifier) verifyArraySlab(slab ArraySlab) error {
}

// Re-encode decoded slab
dataFromDecodedSlab, err := Encode(decodedSlab, v.cborEncMode)
dataFromDecodedSlab, err := EncodeSlab(decodedSlab, v.cborEncMode)
if err != nil {
// Don't need to wrap error as external error because err is already categorized by Encode().
return err
Expand Down
24 changes: 24 additions & 0 deletions array_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,30 @@ func _testArray(
require.Equal(t, 1, len(rootIDs))
require.Equal(t, array.SlabID(), rootIDs[0])

// Encode all non-nil slab
encodedSlabs := make(map[SlabID][]byte)
for id, slab := range storage.deltas {
if slab != nil {
b, err := EncodeSlab(slab, storage.cborEncMode)
require.NoError(t, err)
encodedSlabs[id] = b
}
}

// Test decoded array from new storage to force slab decoding
decodedArray, err := NewArrayWithRootID(
newTestPersistentStorageWithBaseStorageAndDeltas(t, storage.baseStorage, encodedSlabs),
array.SlabID())
require.NoError(t, err)

// Verify decoded array elements
for i, expected := range expectedValues {
actual, err := decodedArray.Get(uint64(i))
require.NoError(t, err)

valueEqual(t, expected, actual)
}

if !hasNestedArrayMapElement {
// Need to call Commit before calling storage.Count() for PersistentSlabStorage.
err = storage.Commit()
Expand Down
125 changes: 121 additions & 4 deletions cmd/stress/typeinfo.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,9 @@ const (
maxArrayTypeValue = 10
maxMapTypeValue = 10

arrayTypeTagNum = 246
mapTypeTagNum = 245
arrayTypeTagNum = 246
mapTypeTagNum = 245
compositeTypeTagNum = 244
)

type arrayTypeInfo struct {
Expand All @@ -52,7 +53,7 @@ func (i arrayTypeInfo) IsComposite() bool {
return false
}

func (i arrayTypeInfo) ID() string {
func (i arrayTypeInfo) Identifier() string {
return fmt.Sprintf("array(%d)", i)
}

Expand Down Expand Up @@ -87,7 +88,7 @@ func (i mapTypeInfo) IsComposite() bool {
return false
}

func (i mapTypeInfo) ID() string {
func (i mapTypeInfo) Identifier() string {
return fmt.Sprintf("map(%d)", i)
}

Expand All @@ -104,6 +105,83 @@ func (i mapTypeInfo) Equal(other atree.TypeInfo) bool {
return ok && i.value == otherMapTypeInfo.value
}

var compositeFieldNames = []string{"a", "b", "c"}

type compositeTypeInfo struct {
fieldStartIndex int // inclusive start index of fieldNames
fieldEndIndex int // exclusive end index of fieldNames
}

var _ atree.TypeInfo = mapTypeInfo{}

// newCompositeTypeInfo creates one of 10 compositeTypeInfo randomly.
// 10 possible composites:
// - ID: composite(0_0), field names: []
// - ID: composite(0_1), field names: ["a"]
// - ID: composite(0_2), field names: ["a", "b"]
// - ID: composite(0_3), field names: ["a", "b", "c"]
// - ID: composite(1_1), field names: []
// - ID: composite(1_2), field names: ["b"]
// - ID: composite(1_3), field names: ["b", "c"]
// - ID: composite(2_2), field names: []
// - ID: composite(2_3), field names: ["c"]
// - ID: composite(3_3), field names: []
func newCompositeTypeInfo() compositeTypeInfo {
// startIndex is [0, 3]
startIndex := r.Intn(len(compositeFieldNames) + 1)

// count is [0, 3]
count := r.Intn(len(compositeFieldNames) - startIndex + 1)

endIndex := startIndex + count
if endIndex > len(compositeFieldNames) {
panic("not reachable")
}

return compositeTypeInfo{fieldStartIndex: startIndex, fieldEndIndex: endIndex}
}

func (i compositeTypeInfo) getFieldNames() []string {
return compositeFieldNames[i.fieldStartIndex:i.fieldEndIndex]
}

func (i compositeTypeInfo) Copy() atree.TypeInfo {
return i
}

func (i compositeTypeInfo) IsComposite() bool {
return true
}

func (i compositeTypeInfo) Identifier() string {
return fmt.Sprintf("composite(%d_%d)", i.fieldStartIndex, i.fieldEndIndex)
turbolent marked this conversation as resolved.
Show resolved Hide resolved
}

func (i compositeTypeInfo) Encode(e *cbor.StreamEncoder) error {
err := e.EncodeTagHead(compositeTypeTagNum)
if err != nil {
return err
}
err = e.EncodeArrayHead(2)
if err != nil {
return err
}
err = e.EncodeInt64(int64(i.fieldStartIndex))
if err != nil {
return err
}
return e.EncodeInt64(int64(i.fieldEndIndex))
}

func (i compositeTypeInfo) Equal(other atree.TypeInfo) bool {
otherCompositeTypeInfo, ok := other.(compositeTypeInfo)
if !ok {
return false
}
return i.fieldStartIndex == otherCompositeTypeInfo.fieldStartIndex &&
i.fieldEndIndex == otherCompositeTypeInfo.fieldEndIndex
}

func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) {
num, err := dec.DecodeTagNumber()
if err != nil {
Expand All @@ -126,6 +204,45 @@ func decodeTypeInfo(dec *cbor.StreamDecoder) (atree.TypeInfo, error) {

return mapTypeInfo{value: int(value)}, nil

case compositeTypeTagNum:
count, err := dec.DecodeArrayHead()
if err != nil {
return nil, err
}
if count != 2 {
return nil, fmt.Errorf(
"failed to decode composite type info: expect 2 elemets, got %d elements",
count,
)
}

startIndex, err := dec.DecodeInt64()
if err != nil {
return nil, err
}

endIndex, err := dec.DecodeInt64()
if err != nil {
return nil, err
}

if endIndex < startIndex {
return nil, fmt.Errorf(
"failed to decode composite type info: endIndex %d < startIndex %d",
endIndex,
startIndex,
)
}

if endIndex > int64(len(compositeFieldNames)) {
return nil, fmt.Errorf(
"failed to decode composite type info: endIndex %d > len(compositeFieldNames) %d",
endIndex,
len(compositeFieldNames))
}

return compositeTypeInfo{fieldStartIndex: int(startIndex), fieldEndIndex: int(endIndex)}, nil

default:
return nil, fmt.Errorf("failed to decode type info with tag number %d", num)
}
Expand Down
50 changes: 49 additions & 1 deletion cmd/stress/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ const (
const (
arrayType int = iota
mapType
compositeType
maxContainerValueType
)

Expand Down Expand Up @@ -123,6 +124,9 @@ func generateContainerValue(
length := r.Intn(maxNestedMapSize)
return newMap(storage, address, length, nestedLevels)

case compositeType:
return newComposite(storage, address, nestedLevels)

default:
return nil, nil, fmt.Errorf("unexpected randome container value type %d", valueType)
}
Expand Down Expand Up @@ -385,6 +389,50 @@ func newMap(
return expectedValues, m, nil
}

// newComposite creates atree.OrderedMap with elements of random composite type and nested level
turbolent marked this conversation as resolved.
Show resolved Hide resolved
func newComposite(
storage atree.SlabStorage,
address atree.Address,
nestedLevel int,
) (mapValue, *atree.OrderedMap, error) {

compositeType := newCompositeTypeInfo()

m, err := atree.NewMap(storage, address, atree.NewDefaultDigesterBuilder(), compositeType)
if err != nil {
return nil, nil, fmt.Errorf("failed to create new map: %w", err)
}

expectedValues := make(mapValue)

for _, name := range compositeType.getFieldNames() {

expectedKey, key := NewStringValue(name), NewStringValue(name)

expectedValue, value, err := randomValue(storage, address, nestedLevel-1)
if err != nil {
return nil, nil, err
}

expectedValues[expectedKey] = expectedValue

existingStorable, err := m.Set(compare, hashInputProvider, key, value)
if err != nil {
return nil, nil, err
}
if existingStorable != nil {
return nil, nil, fmt.Errorf("failed to create new map of composite type: found duplicate field name %s", name)
}
}

err = checkMapDataLoss(expectedValues, m)
if err != nil {
return nil, nil, err
}

return expectedValues, m, nil
}

type InMemBaseStorage struct {
segments map[atree.SlabID][]byte
storageIndex map[atree.Address]atree.SlabIndex
Expand Down Expand Up @@ -492,5 +540,5 @@ func (v mapValue) Storable(atree.SlabStorage, atree.Address, uint64) (atree.Stor
}

var typeInfoComparator = func(a atree.TypeInfo, b atree.TypeInfo) bool {
return a.ID() == b.ID()
return a.Identifier() == b.Identifier()
}
Loading
Loading