Skip to content

Commit

Permalink
sharding: Made Requested Changes(ethereum#92)
Browse files Browse the repository at this point in the history
  • Loading branch information
nisdas committed May 16, 2018
1 parent a140f83 commit 434c511
Show file tree
Hide file tree
Showing 4 changed files with 75 additions and 80 deletions.
28 changes: 16 additions & 12 deletions sharding/collation.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,17 +136,21 @@ func (c *Collation) CreateRawBlobs() ([]*utils.RawBlob, error) {

}

// ConvertBacktoTx converts raw blobs back to their original transactions.
func (c *Collation) ConvertBacktoTx(rawblobs []utils.RawBlob) error {
// ConvertBackToTx converts raw blobs back to their original transactions.
func ConvertBackToTx(rawBlobs []utils.RawBlob) ([]*types.Transaction, error) {

for i := 0; i < len(rawblobs); i++ {
blobs := make([]*types.Transaction, len(rawBlobs))

err := utils.ConvertfromRawBlob(&rawblobs[i], c.transactions[i])
for i := 0; i < len(rawBlobs); i++ {

blobs[i] = types.NewTransaction(0, common.HexToAddress("0x"), nil, 0, nil, nil)

err := utils.ConvertFromRawBlob(&rawBlobs[i], blobs[i])
if err != nil {
return fmt.Errorf("Creation of transactions from raw blobs failed %v", err)
return nil, fmt.Errorf("Creation of transactions from raw blobs failed %v", err)
}
}
return nil
return blobs, nil

}

Expand All @@ -167,7 +171,7 @@ func (c *Collation) Serialize() ([]byte, error) {

if int64(len(serializedTx)) > collationSizelimit {

return nil, fmt.Errorf("The serialized body exceeded the collation size limit", serializedTx)
return nil, fmt.Errorf("The serialized body exceeded the collation size limit: %v", serializedTx)

}

Expand All @@ -176,18 +180,18 @@ func (c *Collation) Serialize() ([]byte, error) {
}

// Deserialize takes a byte array and converts its back to its original transactions.
func (c *Collation) Deserialize(serialisedBlob []byte) error {
func Deserialize(serialisedBlob []byte) (*[]*types.Transaction, error) {

deserializedBlobs, err := utils.Deserialize(serialisedBlob)
if err != nil {
return fmt.Errorf("%v", err)
return nil, fmt.Errorf("%v", err)
}

err = c.ConvertBacktoTx(deserializedBlobs)
txs, err := ConvertBackToTx(deserializedBlobs)

if err != nil {
return fmt.Errorf("%v", err)
return nil, fmt.Errorf("%v", err)
}

return nil
return &txs, nil
}
51 changes: 24 additions & 27 deletions sharding/collation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"math/big"
//"github.com/ethereum/go-ethereum/rlp"
//"reflect"
"bytes"
"reflect"
"testing"

Expand Down Expand Up @@ -71,64 +72,60 @@ func TestSerialize_Deserialize(t *testing.T) {
t.Errorf("Unable to Serialize transactions, %v", err)
}

err = c.Deserialize(results)
deserializedTxs, err := Deserialize(results)

if err != nil {
t.Errorf("Unable to deserialize collation body, %v", err)
}

if len(tx) != len(c.transactions) {
t.Errorf("Transaction length is different before and after serialization: %v, %v", len(tx), len(c.transactions))
if len(tx) != len(*deserializedTxs) {
t.Errorf("Transaction length is different before and after serialization: %v, %v", len(tx), len(*deserializedTxs))
}

for i := 0; i < len(tx); i++ {

aval := fieldAccess(tx[i], []string{"data", "AccountNonce"})
aval2 := fieldAccess(c.transactions[i], []string{"data", "AccountNonce"})
beforeSerialization := tx[i]
afterDeserialization := (*deserializedTxs)[i]

if aval != aval2 {
if beforeSerialization.Nonce() != afterDeserialization.Nonce() {

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", aval, aval2)
t.Errorf("Data before serialization and after deserialization are not the same ,AccountNonce: %v, %v", beforeSerialization.Nonce(), afterDeserialization.Nonce())

}

gval := fieldAccess(tx[i], []string{"data", "GasLimit"})
gval2 := fieldAccess(c.transactions[i], []string{"data", "GasLimit"})
if gval != gval2 {
if beforeSerialization.Gas() != afterDeserialization.Gas() {

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", gval, gval2)
t.Errorf("Data before serialization and after deserialization are not the same ,GasLimit: %v, %v", beforeSerialization.Gas(), afterDeserialization.Gas())

}

pval := fieldAccess(tx[i], []string{"data", "Price"})
pval2 := fieldAccess(c.transactions[i], []string{"data", "Price"})
if pval != pval2 {
if beforeSerialization.GasPrice().Cmp(afterDeserialization.GasPrice()) != 0 {

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", pval, pval2)
t.Errorf("Data before serialization and after deserialization are not the same ,Price: %v, %v", beforeSerialization.GasPrice(), afterDeserialization.GasPrice())

}

rval := fieldAccess(tx[i], []string{"data", "Recipient"})
rval2 := fieldAccess(c.transactions[i], []string{"data", "Recipient"})
if rval != rval2 {
beforeAddress := reflect.ValueOf(beforeSerialization.To())
afterAddress := reflect.ValueOf(afterDeserialization.To())

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", rval, rval2)
if reflect.DeepEqual(beforeAddress, afterAddress) {

t.Errorf("Data before serialization and after deserialization are not the same ,Recipient: %v, %v", beforeAddress, afterAddress)

}

amval := fieldAccess(tx[i], []string{"data", "Amount"})
amval2 := fieldAccess(c.transactions[i], []string{"data", "Amount"})
if amval != amval2 {
if beforeSerialization.Value().Cmp(afterDeserialization.Value()) != 0 {

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", amval, amval2)
t.Errorf("Data before serialization and after deserialization are not the same ,Amount: %v, %v", beforeSerialization.Value(), afterDeserialization.Value())

}

paval := fieldAccess(tx[i], []string{"data", "Payload"})
paval2 := fieldAccess(c.transactions[i], []string{"data", "Payload"})
if paval != paval2 {
beforeData := beforeSerialization.Data()
afterData := afterDeserialization.Data()

if !bytes.Equal(beforeData, afterData) {

t.Errorf("Data before serialization and after deserialization are not the same: %v, %v", paval, paval2)
t.Errorf("Data before serialization and after deserialization are not the same ,Payload: %v, %v", beforeData, afterData)

}

Expand Down
66 changes: 30 additions & 36 deletions sharding/utils/marshal.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,16 +23,16 @@ type RawBlob struct {
}

// NewRawBlob builds a raw blob from any interface by using RLP encoding
func NewRawBlob(i interface{}, skipevm bool) (*RawBlob, error) {
func NewRawBlob(i interface{}, skipEvm bool) (*RawBlob, error) {
data, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, fmt.Errorf("RLP encoding was a failure:%v", err)
}
return &RawBlob{data: data, flags: Flags{skipEvmExecution: skipevm}}, nil
return &RawBlob{data: data, flags: Flags{skipEvmExecution: skipEvm}}, nil
}

// ConvertfromRawBlob converts raw blob back from a byte array to its interface
func ConvertfromRawBlob(blob *RawBlob, i interface{}) error {
// ConvertFromRawBlob converts raw blob back from a byte array to its interface
func ConvertFromRawBlob(blob *RawBlob, i interface{}) error {
data := (*blob).data
err := rlp.DecodeBytes(data, i)
if err != nil {
Expand All @@ -42,13 +42,7 @@ func ConvertfromRawBlob(blob *RawBlob, i interface{}) error {
return nil
}

// ConvertToRawBlob will convert any supported type into a the RawBlob type.
func ConvertToRawBlob(arg interface{}) ([]RawBlob, error) {
//TODO: will be done in part 2 to convert any type to a rawBlob
return nil, nil
}

// serializeBlob parses the blob and serializes it appropriately.
// SerializeBlob parses the blob and serializes it appropriately.
func SerializeBlob(cb RawBlob) ([]byte, error) {

length := int64(len(cb.data))
Expand All @@ -59,18 +53,18 @@ func SerializeBlob(cb RawBlob) ([]byte, error) {
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempbody := []byte{}
tempBody := []byte{}

// if blob is less than 31 bytes, it adds the indicator chunk and pads the remaining empty bytes to the right

if chunksNumber == 0 {
paddedbytes := make([]byte, (chunkDataSize - length))
paddedBytes := make([]byte, (chunkDataSize - length))
indicatorByte[0] = byte(terminalLength)
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempbody = append(indicatorByte, append(cb.data, paddedbytes...)...)
return tempbody, nil
tempBody = append(indicatorByte, append(cb.data, paddedBytes...)...)
return tempBody, nil
}

//if there is no need to pad empty bytes, then the indicator byte is added as 00011111
Expand All @@ -83,7 +77,7 @@ func SerializeBlob(cb RawBlob) ([]byte, error) {
// is created by appending the indcator byte to the data chunks. The data chunks are separated into sets of
// 31

tempbody = append(tempbody,
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(i-1)*chunkDataSize:i*chunkDataSize]...)...)

Expand All @@ -94,11 +88,11 @@ func SerializeBlob(cb RawBlob) ([]byte, error) {
}

// Terminal chunk has its indicator byte added, chunkDataSize*chunksNumber refers to the total size of the blob
tempbody = append(tempbody,
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(chunksNumber-1)*chunkDataSize:chunkDataSize*chunksNumber]...)...)

return tempbody, nil
return tempBody, nil

}

Expand All @@ -108,7 +102,7 @@ func SerializeBlob(cb RawBlob) ([]byte, error) {

for i := int64(1); i <= chunksNumber; i++ {

tempbody = append(tempbody,
tempBody = append(tempBody,
append(indicatorByte,
cb.data[(i-1)*chunkDataSize:i*chunkDataSize]...)...)

Expand All @@ -120,14 +114,14 @@ func SerializeBlob(cb RawBlob) ([]byte, error) {
if cb.flags.skipEvmExecution {
indicatorByte[0] |= (1 << 7)
}
tempbody = append(tempbody,
tempBody = append(tempBody,
append(indicatorByte,
cb.data[chunkDataSize*chunksNumber:length]...)...)

emptyBytes := make([]byte, (chunkDataSize - terminalLength))
tempbody = append(tempbody, emptyBytes...)
tempBody = append(tempBody, emptyBytes...)

return tempbody, nil
return tempBody, nil

}

Expand Down Expand Up @@ -157,44 +151,44 @@ func Deserialize(data []byte) ([]RawBlob, error) {
length := int64(len(data))
chunksNumber := length / chunkSize
indicatorByte := byte(0)
tempbody := RawBlob{}
var deserializedblob []RawBlob
tempBody := RawBlob{}
var deserializedBlob []RawBlob

// This separates the byte array into its separate blobs
for i := int64(1); i <= chunksNumber; i++ {
indicatorIndex := (i - 1) * chunkSize

// Tests if the chunk delimiter is zero, if it is it will append the data chunk
// to tempbody
// to tempBody
if data[indicatorIndex] == indicatorByte || data[indicatorIndex] == byte(128) {
tempbody.data = append(tempbody.data, data[(indicatorIndex+1):(i)*chunkSize]...)
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):(i)*chunkSize]...)

} else if data[indicatorIndex] == byte(31) || data[indicatorIndex] == byte(159) {
if data[indicatorIndex] == byte(159) {
tempbody.flags.skipEvmExecution = true
tempBody.flags.skipEvmExecution = true
}
tempbody.data = append(tempbody.data, data[(indicatorIndex+1):indicatorIndex+1+chunkDataSize]...)
deserializedblob = append(deserializedblob, tempbody)
tempbody = RawBlob{}
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):indicatorIndex+1+chunkDataSize]...)
deserializedBlob = append(deserializedBlob, tempBody)
tempBody = RawBlob{}

} else {
// Since the chunk delimiter in non-zero now we can infer that it is a terminal chunk and
// add it and append to the deserializedblob slice. The tempbody signifies a single deserialized blob
// add it and append to the deserializedblob slice. The tempBody signifies a single deserialized blob
terminalIndex := int64(data[indicatorIndex])
//Check if EVM flag is equal to 1
flagindex := data[indicatorIndex] >> 7
if flagindex == byte(1) {
terminalIndex = int64(data[indicatorIndex]) - 128
tempbody.flags.skipEvmExecution = true
tempBody.flags.skipEvmExecution = true
}
tempbody.data = append(tempbody.data, data[(indicatorIndex+1):(indicatorIndex+1+terminalIndex)]...)
deserializedblob = append(deserializedblob, tempbody)
tempbody = RawBlob{}
tempBody.data = append(tempBody.data, data[(indicatorIndex+1):(indicatorIndex+1+terminalIndex)]...)
deserializedBlob = append(deserializedBlob, tempBody)
tempBody = RawBlob{}

}

}

return deserializedblob, nil
return deserializedBlob, nil

}
10 changes: 5 additions & 5 deletions sharding/utils/marshal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,11 @@ import (
"testing"
)

func buildrawblob(size int64) []RawBlob {
func buildRawBlob(size int64) []RawBlob {
tempbody := make([]RawBlob, size)
for i := int64(0); i < size; i++ {
var rawblob RawBlob
rawblob.data = buildblob(size)
rawblob.data = buildBlob(size)
flagset := byte(rand.Int()) >> 7
if flagset == byte(1) {
rawblob.flags.skipEvmExecution = true
Expand All @@ -22,7 +22,7 @@ func buildrawblob(size int64) []RawBlob {

}

func buildblob(size int64) []byte {
func buildBlob(size int64) []byte {

tempbody := make([]byte, size)
for i := int64(0); i < size; i++ {
Expand All @@ -35,7 +35,7 @@ func buildblob(size int64) []byte {
func TestSize(t *testing.T) {
for i := 0; i < 300; i++ {
size := int64(i)
blob := buildrawblob(size)
blob := buildRawBlob(size)
chunksafterSerialize := size / chunkDataSize
terminalchunk := size % chunkDataSize
if terminalchunk != 0 {
Expand Down Expand Up @@ -66,7 +66,7 @@ func TestSerializeAndDeserializeblob(t *testing.T) {

for i := 1; i < 300; i++ {

blob := buildrawblob(int64(i))
blob := buildRawBlob(int64(i))

drefbody := make([]*RawBlob, len(blob))
for s := 0; s < len(blob); s++ {
Expand Down

0 comments on commit 434c511

Please sign in to comment.