diff --git a/.github/workflows/gobra.yml b/.github/workflows/gobra.yml index 6c1467bad..d88022b09 100644 --- a/.github/workflows/gobra.yml +++ b/.github/workflows/gobra.yml @@ -431,5 +431,4 @@ jobs: useZ3API: ${{ env.useZ3API }} disableNL: '0' viperBackend: ${{ env.viperBackend }} - unsafeWildcardOptimization: '0' - + unsafeWildcardOptimization: '0' \ No newline at end of file diff --git a/pkg/slayers/path/hopfield.go b/pkg/slayers/path/hopfield.go index c26b6d5e1..57452ac26 100644 --- a/pkg/slayers/path/hopfield.go +++ b/pkg/slayers/path/hopfield.go @@ -79,8 +79,8 @@ type HopField struct { // @ preserves acc(sl.Bytes(raw, 0, HopLen), R45) // @ ensures h.Mem() // @ ensures err == nil -// @ ensures unfolding h.Mem() in -// @ BytesToIO_HF(raw, 0, 0, HopLen) == h.ToIO_HF() +// @ ensures BytesToIO_HF(raw, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) DecodeFromBytes(raw []byte) (err error) { if len(raw) < HopLen { @@ -114,16 +114,13 @@ func (h *HopField) DecodeFromBytes(raw []byte) (err error) { // @ preserves acc(h.Mem(), R10) // @ preserves sl.Bytes(b, 0, HopLen) // @ ensures err == nil +// @ ensures BytesToIO_HF(b, 0, 0, HopLen) == +// @ unfolding acc(h.Mem(), R10) in h.ToIO_HF() // @ decreases func (h *HopField) SerializeTo(b []byte) (err error) { if len(b) < HopLen { return serrors.New("buffer for HopField too short", "expected", MacLen, "actual", len(b)) } - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves sl.Bytes(b, 0, HopLen) - //@ decreases - //@ outline( //@ unfold sl.Bytes(b, 0, HopLen) //@ unfold acc(h.Mem(), R11) b[0] = 0 @@ -139,24 +136,17 @@ func (h *HopField) SerializeTo(b []byte) (err error) { //@ assert &b[4:6][0] == &b[4] && &b[4:6][1] == &b[5] binary.BigEndian.PutUint16(b[4:6], h.ConsEgress) //@ assert forall i int :: { &b[i] } 0 <= i && i < HopLen ==> acc(&b[i]) - //@ fold sl.Bytes(b, 0, HopLen) - //@ fold acc(h.Mem(), R11) - //@ ) - //@ requires len(b) >= HopLen - //@ preserves acc(h.Mem(), R11) - //@ preserves sl.Bytes(b, 0, HopLen) - //@ decreases - //@ outline( - //@ unfold sl.Bytes(b, 0, HopLen) - //@ unfold acc(h.Mem(), R11) //@ assert forall i int :: { &h.Mac[:][i] } 0 <= i && i < len(h.Mac) ==> //@ &h.Mac[i] == &h.Mac[:][i] //@ assert forall i int :: { &b[6:6+MacLen][i] }{ &b[i+6] } 0 <= i && i < MacLen ==> //@ &b[6:6+MacLen][i] == &b[i+6] - copy(b[6:6+MacLen], h.Mac[:] /*@, R11 @*/) + copy(b[6:6+MacLen], h.Mac[:] /*@, R47 @*/) + //@ assert forall i int :: {&h.Mac[:][i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == b[6:6+MacLen][i] + //@ assert forall i int :: {&h.Mac[i]} 0 <= i && i < MacLen ==> h.Mac[:][i] == h.Mac[i] + //@ EqualBytesImplyEqualMac(b[6:6+MacLen], h.Mac) //@ fold sl.Bytes(b, 0, HopLen) + //@ assert h.ToIO_HF() == BytesToIO_HF(b, 0, 0, HopLen) //@ fold acc(h.Mem(), R11) - //@ ) return nil } diff --git a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra index 09e421953..04aa00308 100644 --- a/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra +++ b/pkg/slayers/path/scion/info_hop_setter_lemmas.gobra @@ -45,7 +45,7 @@ pure func InfofieldByteSlice(raw []byte, currInfIdx int) ([]byte) { return let infOffset := currInfIdx == 4 ? path.InfoFieldOffset(0, MetaLen) : path.InfoFieldOffset(currInfIdx, MetaLen) in - raw[infOffset:infOffset+path.InfoLen] + raw[infOffset:infOffset + path.InfoLen] } // HopfieldsStartIdx returns index of the first byte of the hopfields of a segment @@ -61,8 +61,8 @@ pure func HopfieldsStartIdx(currInfIdx int, segs io.SegLens) int { return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in (currInfIdx == 0 || currInfIdx == 4) ? infOffset : - currInfIdx == 1 ? infOffset+segs.Seg1Len*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen + currInfIdx == 1 ? infOffset + segs.Seg1Len * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen } // HopfieldsStartIdx returns index of the last byte of the hopfields of a segment @@ -77,9 +77,9 @@ decreases pure func HopfieldsEndIdx(currInfIdx int, segs io.SegLens) int { return let numInf := segs.NumInfoFields() in let infOffset := path.InfoFieldOffset(numInf, MetaLen) in - (currInfIdx == 0 || currInfIdx == 4) ? infOffset+segs.Seg1Len*path.HopLen : - currInfIdx == 1 ? infOffset+(segs.Seg1Len+segs.Seg2Len)*path.HopLen : - infOffset+(segs.Seg1Len+segs.Seg2Len+segs.Seg3Len)*path.HopLen + (currInfIdx == 0 || currInfIdx == 4) ? infOffset + segs.Seg1Len * path.HopLen : + currInfIdx == 1 ? infOffset + (segs.Seg1Len + segs.Seg2Len) * path.HopLen : + infOffset + (segs.Seg1Len + segs.Seg2Len + segs.Seg3Len) * path.HopLen } // HopfieldsStartIdx returns returns the byte slice of the hopfields of a segment @@ -107,9 +107,9 @@ requires segs.Valid() requires PktLen(segs, MetaLen) <= len(raw) requires acc(sl.Bytes(raw, 0, len(raw)), p) ensures acc(sl.Bytes(raw[:HopfieldsStartIdx(0, segs)], 0, HopfieldsStartIdx(0, segs)), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len*path.HopLen), p) -ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len*path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 0, segs), 0, segs.Seg1Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 1, segs), 0, segs.Seg2Len * path.HopLen), p) +ensures acc(sl.Bytes(HopfieldsByteSlice(raw, 2, segs), 0, segs.Seg3Len * path.HopLen), p) ensures acc(sl.Bytes(raw[HopfieldsEndIdx(2, segs):], 0, len(raw[HopfieldsEndIdx(2, segs):])), p) decreases func SliceBytesIntoSegments(raw []byte, segs io.SegLens, p perm) { @@ -152,7 +152,7 @@ func CombineBytesFromSegments(raw []byte, segs io.SegLens, p perm) { ghost requires 0 < p requires segs.Valid() -requires PktLen(segs, MetaLen) <= len(raw) +requires MetaLen + numInf * path.InfoLen <= len(raw) requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw, 0, len(raw)), p) ensures acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) @@ -184,7 +184,7 @@ func SliceBytesIntoInfoFields(raw []byte, numInf int, segs io.SegLens, p perm) { ghost requires 0 < p requires segs.Valid() -requires PktLen(segs, MetaLen) <= len(raw) +requires MetaLen + numInf * path.InfoLen <= len(raw) requires numInf == segs.NumInfoFields() requires acc(sl.Bytes(raw[:MetaLen], 0, MetaLen), p) requires acc(sl.Bytes(InfofieldByteSlice(raw, 0), 0, path.InfoLen), p) @@ -220,7 +220,7 @@ ghost opaque requires 0 < SegLen requires 0 <= currHfIdx && currHfIdx <= SegLen -requires SegLen*path.HopLen == len(hopfields) +requires SegLen * path.HopLen == len(hopfields) requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) decreases pure func CurrSegWithInfo(hopfields []byte, currHfIdx int, SegLen int, inf io.AbsInfoField) io.IO_seg3 { @@ -240,7 +240,7 @@ requires (currInfIdx == 1 && segs.Seg2Len > 0) || let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func LeftSegWithInfo( @@ -267,7 +267,7 @@ requires (currInfIdx == 0 && segs.Seg2Len > 0) || let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func RightSegWithInfo( @@ -294,7 +294,7 @@ requires (segs.Seg2Len > 0 && segs.Seg3Len > 0 && let start := HopfieldsStartIdx(currInfIdx, segs) in let end := HopfieldsEndIdx(currInfIdx, segs) in inf != none[io.AbsInfoField] && - len(hopfields) == end-start && + len(hopfields) == end - start && acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) decreases pure func MidSegWithInfo( @@ -318,10 +318,10 @@ requires offset + path.HopLen * SegLen <= len(raw) requires 0 <= currHfIdx && currHfIdx <= SegLen requires 0 <= currInfIdx && currInfIdx < 3 preserves acc(sl.Bytes(raw, 0, len(raw)), R50) -preserves acc(sl.Bytes(raw[offset:offset+SegLen*path.HopLen], 0, SegLen*path.HopLen), R50) +preserves acc(sl.Bytes(raw[offset:offset + SegLen * path.HopLen], 0, SegLen * path.HopLen), R50) preserves acc(sl.Bytes(InfofieldByteSlice(raw, currInfIdx), 0, path.InfoLen), R50) ensures let inf := path.BytesToAbsInfoField(InfofieldByteSlice(raw, currInfIdx), 0) in - CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) == + CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) == CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) decreases func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegLen int) { @@ -333,11 +333,11 @@ func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegL assert reveal path.BytesToAbsInfoField(raw, infOffset) == reveal path.BytesToAbsInfoField(infoBytes, 0) reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, SegLen, MetaLen) - reveal CurrSegWithInfo(raw[offset:offset+SegLen*path.HopLen], currHfIdx, SegLen, inf) + reveal CurrSegWithInfo(raw[offset:offset + SegLen * path.HopLen], currHfIdx, SegLen, inf) fold acc(sl.Bytes(raw, 0, len(raw)), R56) fold acc(sl.Bytes(infoBytes, 0, path.InfoLen), R56) widenSegment(raw, offset, currHfIdx, inf.AInfo, inf.UInfo, inf.ConsDir, - inf.Peer, SegLen, offset, offset+SegLen*path.HopLen) + inf.Peer, SegLen, offset, offset + SegLen * path.HopLen) } // UpdateCurrSegInfo proves that updating the infofield from inf1 to inf2 does not alter the hopfields @@ -345,7 +345,7 @@ func CurrSegEquality(raw []byte, offset int, currInfIdx int, currHfIdx int, SegL ghost requires 0 < SegLen requires 0 <= currHfIdx && currHfIdx <= SegLen -requires SegLen*path.HopLen == len(raw) +requires SegLen * path.HopLen == len(raw) preserves acc(sl.Bytes(raw, 0, len(raw)), R50) ensures CurrSegWithInfo(raw, currHfIdx, SegLen, inf1).UpdateCurrSeg(inf2) == CurrSegWithInfo(raw, currHfIdx, SegLen, inf2) @@ -543,4 +543,157 @@ func MidSegEquality(raw []byte, currInfIdx int, segs io.SegLens) { } else { reveal MidSegWithInfo(nil, currInfIdx, segs, none[io.AbsInfoField]) } +} + +// `BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf)` holds iff `hopfields` contains the +// serialization of the hopfields of the current segment, which has been traversed until the +// `currHfIdx`-th hop (out of `segLen` hops in total). +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +decreases +pure func BytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) bool { + return let currseg := CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) in + let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + len(currseg.Future) > 0 && + currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) && + currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx - 1)) && + currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) && + currseg.AInfo == inf.AInfo && + currseg.UInfo == inf.UInfo && + currseg.ConsDir == inf.ConsDir && + currseg.Peer == inf.Peer +} + +// `EstablishBytesStoreCurrSeg` shows that the raw bytes containing all hopfields +// can be split into three slices, one that exclusively contains all past hopfields, one +// that exclusively contains all future ones and another one for the current hopfield. +// This helps in proving that the future and past hopfields remain unchanged when the +// current hopfield is modified. +ghost +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R49) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R49) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R49) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R49) +ensures BytesStoreCurrSeg(hopfields, currHfIdx, segLen, inf) +decreases +func EstablishBytesStoreCurrSeg(hopfields []byte, currHfIdx int, segLen int, inf io.AbsInfoField) { + currseg := reveal CurrSegWithInfo(hopfields, currHfIdx, segLen, inf) + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + unfold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) + unfold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + assert len(currseg.Future) > 0 + assert currseg.Future[0] == path.BytesToIO_HF(hopfields[currHfStart:currHfEnd], 0, 0, path.HopLen) + splitHopFieldsInPastAndFuture(hopfields, currHfIdx, segLen) + assert currseg.Past == segPast(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + assert currseg.Future[0] == hf[currHfIdx] + assert hf[currHfIdx:][1:] == hf[currHfIdx + 1:] + assert currseg.Future == hf[currHfIdx:] + assert currseg.Future[1:] == hopFields(hopfields[currHfEnd:], 0, 0, (segLen - currHfIdx- 1)) + assert currseg.History == segHistory(hopFields(hopfields[:currHfStart], 0, 0, currHfIdx)) + fold acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), R56) + fold acc(sl.Bytes(hopfields, 0, len(hopfields)), R56) +} + +// `splitHopFieldsInPastAndFuture` shows that the raw bytes containing all hopfields +// can be split into two slices, one that exclusively contains all past hopfields and another +// that exclusively contains all future ones. This helps in proving that the future and past +// hopfields remain unchanged when the current hopfield is modified. +ghost +requires 0 < segLen +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +preserves acc(sl.Bytes(hopfields, 0, len(hopfields)), R50) +preserves let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), R50) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), R50) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + hopFields(hopfields, 0, 0, segLen)[:currHfIdx] == + hopFields(hopfields[:currHfStart], 0, 0, currHfIdx) && + hopFields(hopfields, 0, 0, segLen)[currHfIdx + 1:] == + hopFields(hopfields[currHfEnd:], 0, 0, segLen - currHfIdx - 1) +decreases +func splitHopFieldsInPastAndFuture(hopfields []byte, currHfIdx int, segLen int) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + hf := hopFields(hopfields, 0, 0, segLen) + hopFieldsBytePositionsLemma(hopfields, 0, 0, segLen, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, segLen, hf) + + hfPast := hopFields(hopfields, 0, 0, currHfIdx) + hopFieldsBytePositionsLemma(hopfields, 0, 0, currHfIdx, R54) + reveal hopFieldsBytePositions(hopfields, 0, 0, currHfIdx, hfPast) + widenHopFields(hopfields, 0, 0, currHfIdx, 0, currHfStart, R52) + + hfFuture := hopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1) + hopFieldsBytePositionsLemma(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, R54) + reveal hopFieldsBytePositions(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, hfFuture) + widenHopFields(hopfields, currHfEnd, 0, segLen - currHfIdx - 1, + currHfEnd, segLen * path.HopLen, R52) +} + +// `SplitHopfields` splits the permission to the raw bytes of a segment into the permission +// to the subslice containing all past hopfields, to the sublice containing the current hopfield, +// and to another containing all future hopfields. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +ensures let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +decreases +func SplitHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.SplitByIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) + sl.SplitByIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.Reslice_Bytes(hopfields, 0, currHfStart, p) + sl.Reslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Reslice_Bytes(hopfields, currHfEnd, len(hopfields), p) +} + +// `CombineHopfields` combines the permissions to the slices of bytes storing the past hopfields, +// current hopfield, and future hopfields of a segment into a single permission to the slice +// containing all hopfields of that segment. +ghost +requires 0 < p +requires 0 <= currHfIdx && currHfIdx < segLen +requires segLen * path.HopLen == len(hopfields) +requires let currHfStart := currHfIdx * path.HopLen in + let currHfEnd := currHfStart + path.HopLen in + acc(sl.Bytes(hopfields[:currHfStart], 0, currHfStart), p) && + acc(sl.Bytes(hopfields[currHfStart:currHfEnd], 0, path.HopLen), p) && + acc(sl.Bytes(hopfields[currHfEnd:], 0, (segLen - currHfIdx - 1) * path.HopLen), p) +ensures acc(sl.Bytes(hopfields, 0, len(hopfields)), p) +decreases +func CombineHopfields(hopfields []byte, currHfIdx int, segLen int, p perm) { + currHfStart := currHfIdx * path.HopLen + currHfEnd := currHfStart + path.HopLen + sl.Unslice_Bytes(hopfields, currHfEnd, len(hopfields), p) + sl.Unslice_Bytes(hopfields, currHfStart, currHfEnd, p) + sl.Unslice_Bytes(hopfields, 0, currHfStart, p) + sl.CombineAtIndex_Bytes(hopfields, currHfStart, len(hopfields), currHfEnd, p) + sl.CombineAtIndex_Bytes(hopfields, 0, len(hopfields), currHfStart, p) } \ No newline at end of file diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index ca3595a5e..4ee0008c6 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -224,7 +224,7 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // pres for IO: // @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) // @ requires validPktMetaHdr(ubuf) -// @ requires len(s.absPkt(ubuf).CurrSeg.Future) > 0 +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() // @ requires s.GetBase(ubuf).IsXoverSpec() ==> // @ s.absPkt(ubuf).LeftSeg != none[io.IO_seg3] // @ ensures sl.Bytes(ubuf, 0, len(ubuf)) @@ -529,16 +529,34 @@ func (s *Raw) GetCurrentHopField( /*@ ghost ubuf []byte @*/ ) (res path.HopField // SetHopField updates the HopField at a given index. // @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires acc(s.Mem(ubuf), R20) +// @ requires sl.Bytes(ubuf, 0, len(ubuf)) +// pres for IO: +// @ requires validPktMetaHdr(ubuf) +// @ requires s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ requires s.absPkt(ubuf).PathNotFullyTraversed() +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil ==> +// @ validPktMetaHdr(ubuf) && +// @ s.GetBase(ubuf).EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrHF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(ubuf)) in +// @ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in +// @ s.absPkt(ubuf) == newPkt // @ decreases +// @ #backend[exhaleMode(1)] func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) (r error) { - //@ share hop + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + tmpHopField /*@@@*/ := hop + //@ path.AbsMacArrayCongruence(hop.Mac, tmpHopField.Mac) // (VerifiedSCION) Cannot assert bounds of uint: // https://github.com/viperproject/gobra/issues/192 - //@ assume 0 <= hop.ConsIngress && 0 <= hop.ConsEgress - //@ fold hop.Mem() + //@ assume 0 <= tmpHopField.ConsIngress && 0 <= tmpHopField.ConsEgress + //@ fold acc(tmpHopField.Mem(), R9) //@ unfold acc(s.Mem(ubuf), R20) //@ unfold acc(s.Base.Mem(), R20) if idx >= s.NumHops { @@ -552,11 +570,19 @@ func (s *Raw) SetHopField(hop path.HopField, idx int /*@, ghost ubuf []byte @*/) //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ assert sl.Bytes(s.Raw, 0, len(s.Raw)) //@ sl.SplitRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) - ret := hop.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) + ret := tmpHopField.SerializeTo(s.Raw[hopOffset : hopOffset+path.HopLen]) //@ sl.CombineRange_Bytes(s.Raw, hopOffset, hopOffset+path.HopLen, writePerm) //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) //@ fold acc(s.Base.Mem(), R20) //@ fold acc(s.Mem(ubuf), R20) + // (VerifiedSCION) The proof for these assumptions is provided in PR #361 + // (https://github.com/viperproject/VerifiedSCION/pull/361), which will + // be merged once the performance issues are resolved. + //@ TemporaryAssumeForIO(validPktMetaHdr(ubuf) && s.GetBase(ubuf).EqAbsHeader(ubuf)) + //@ TemporaryAssumeForIO(idx == int(old(s.GetCurrHF(ubuf))) ==> + //@ let oldPkt := old(s.absPkt(ubuf)) in + //@ let newPkt := oldPkt.UpdateHopField(hop.ToIO_HF()) in + //@ s.absPkt(ubuf) == newPkt) return ret } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index 7b9066698..9bf4537db 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -233,31 +233,21 @@ pure func hopFields( } ghost -requires -1 <= currHfIdx && currHfIdx < len(hopfields) -ensures len(res) == currHfIdx + 1 -decreases currHfIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { - return currHfIdx == -1 ? - seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHfIdx]} ++ segPast(hopfields, currHfIdx - 1) +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segPast(hopfields seq[io.IO_HF]) (res seq[io.IO_HF]) { + return len(hopfields) == 0 ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[len(hopfields)-1]} ++ segPast( + hopfields[:len(hopfields)-1]) } ghost -requires 0 <= currHfIdx && currHfIdx <= len(hopfields) -ensures len(res) == len(hopfields) - currHfIdx -decreases len(hopfields) - currHfIdx -pure func segFuture(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_HF]) { - return currHfIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHfIdx]} ++ segFuture(hopfields, currHfIdx + 1) -} - -ghost -requires -1 <= currHfIdx && currHfIdx < len(hopfields) -ensures len(res) == currHfIdx + 1 -decreases currHfIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHfIdx int) (res seq[io.IO_ahi]) { - return currHfIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHfIdx].Toab()} ++ segHistory(hopfields, currHfIdx - 1) +ensures len(res) == len(hopfields) +decreases len(hopfields) +pure func segHistory(hopfields seq[io.IO_HF]) (res seq[io.IO_ahi]) { + return len(hopfields) == 0 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[len(hopfields)-1].Toab()} ++ segHistory( + hopfields[:len(hopfields)-1]) } ghost @@ -284,9 +274,9 @@ pure func segment(raw []byte, UInfo : uinfo, ConsDir : consDir, Peer : peer, - Past : segPast(hopfields, currHfIdx - 1), - Future : segFuture(hopfields, currHfIdx), - History : segHistory(hopfields, currHfIdx - 1), + Past : segPast(hopfields[:currHfIdx]), + Future : hopfields[currHfIdx:], + History : segHistory(hopfields[:currHfIdx]), } } @@ -408,7 +398,7 @@ requires acc(sl.Bytes(raw, 0, len(raw)), R56) decreases pure func RawBytesToMetaHdr(raw []byte) MetaHdr { return unfolding acc(sl.Bytes(raw, 0, len(raw)), R56) in - let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in + let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in DecodedFrom(hdr) } @@ -489,7 +479,7 @@ func (s *Raw) EstablishValidPktMetaHdr(ghost ub []byte) { ghost requires oldPkt.LeftSeg != none[io.IO_seg2] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() decreases pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { return io.IO_Packet2 { @@ -501,7 +491,7 @@ pure func AbsXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { } ghost -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() decreases pure func AbsIncPath(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { return io.IO_Packet2 { @@ -652,7 +642,7 @@ func (s *Raw) XoverLemma(ubuf []byte) { ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (s *Raw) EqAbsHopField(pkt io.IO_pkt2, hop io.IO_HF) bool { return let currHF := pkt.CurrSeg.Future[0] in @@ -675,7 +665,7 @@ preserves acc(s.Mem(ubuf), R53) preserves acc(sl.Bytes(ubuf, 0, len(ubuf)), R53) preserves validPktMetaHdr(ubuf) preserves s.GetBase(ubuf).EqAbsHeader(ubuf) -preserves len(s.absPkt(ubuf).CurrSeg.Future) > 0 +preserves s.absPkt(ubuf).PathNotFullyTraversed() preserves s.GetBase(ubuf).ValidCurrInfSpec() preserves s.GetBase(ubuf).ValidCurrHfSpec() preserves s.CorrectlyDecodedInf(ubuf, info) @@ -754,6 +744,9 @@ decreases func IncCurrSeg(raw []byte, offset int, currInfIdx int, currHfIdx int, segLen int) { currseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx, segLen, 0) incseg := reveal CurrSeg(raw, offset, currInfIdx, currHfIdx+1, segLen, 0) + hf := hopFields(raw, offset, 0, segLen) + hfPast := hf[:currHfIdx+1] + assert hfPast[:len(hfPast)-1] == hf[:currHfIdx] assert currseg.AInfo == incseg.AInfo assert currseg.UInfo == incseg.UInfo assert currseg.ConsDir == incseg.ConsDir diff --git a/router/dataplane.go b/router/dataplane.go index e06dfaa73..7b81f812e 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -1936,7 +1936,7 @@ func (p *scionPacketProcessor) packSCMP( // Postconditions for IO: // @ ensures reserr == nil ==> // @ slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) @@ -2058,7 +2058,7 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> @@ -2275,7 +2275,7 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ // @ requires dp.Valid() // @ requires p.d.WellConfigured() // @ requires p.d.DpAgreesWithSpec(dp) -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.segmentChange ==> @@ -2391,7 +2391,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // @ requires acc(&p.ingressID, R21) // preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ requires p.LastHopLen(ub) // @ requires p.EqAbsHopField(absPkt(ub)) @@ -2406,7 +2406,7 @@ func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, gh // posconditions for IO: // @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) // @ ensures err == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures err == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures err == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures err == nil ==> // @ absPkt(ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(ub)), path.ifsToIO_ifs(p.ingressID)) // @ ensures err == nil ==> p.LastHopLen(ub) @@ -2520,7 +2520,7 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ // @ ensures sl.Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsHopField(oldPkt) // @ requires p.EqAbsInfoField(oldPkt) // @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) @@ -2612,7 +2612,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ requires !p.GetIsXoverSpec(ub) // Preconditions for IO: // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures acc(&p.infoField) @@ -2721,7 +2721,7 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr // @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).Future) > 0 // @ ensures reserr == nil ==> len(get(old(absPkt(ub)).LeftSeg).History) == 0 // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == AbsDoXover(old(absPkt(ub))) @@ -2776,13 +2776,21 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scio // @ assert slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ assert absPkt(ub) == reveal AbsDoXover(old(absPkt(ub))) var err error - if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { + // (VerifiedSCION) Due to an incompleteness (https://github.com/viperproject/gobra/issues/770), + // we introduce a temporary variable to be able to call `path.AbsMacArrayCongruence()`. + var tmpHopField path.HopField + if tmpHopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } + p.hopField = tmpHopField + // @ path.AbsMacArrayCongruence(p.hopField.Mac, tmpHopField.Mac) + // @ assert p.hopField.ToIO_HF() == tmpHopField.ToIO_HF() + // @ assert reveal p.path.CorrectlyDecodedHf(ubPath, tmpHopField) + // @ assert reveal p.path.CorrectlyDecodedHf(ubPath, p.hopField) if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubPath @*/ ); err != nil { // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) @@ -2791,8 +2799,13 @@ func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost currBase scio return processResult{}, err } // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) + // @ absPktFutureLemma(ub) + // @ p.path.DecodingLemma(ubPath, p.infoField, p.hopField) + // @ assert reveal p.path.EqAbsInfoField(p.path.absPkt(ubPath), p.infoField.ToAbsInfoField()) + // @ assert reveal p.path.EqAbsHopField(p.path.absPkt(ubPath), p.hopField.ToIO_HF()) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2832,7 +2845,7 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u // @ ensures acc(&p.infoField, R21) // @ ensures acc(&p.hopField, R21) // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.EqAbsHopField(oldPkt) // @ ensures p.EqAbsInfoField(oldPkt) @@ -2862,7 +2875,7 @@ func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ reserr != nil && sl.Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() // contracts for IO-spec -// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires oldPkt.PathNotFullyTraversed() // @ requires p.EqAbsInfoField(oldPkt) // @ requires p.EqAbsHopField(oldPkt) // @ ensures reserr != nil && respr.OutPkt != nil ==> @@ -2927,12 +2940,12 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ requires p.DstIsLocalIngressID(ub) // @ requires p.LastHopLen(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) // @ ensures reserr == nil ==> p.LastHopLen(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) // @ ensures reserr == nil ==> old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub) @@ -2940,6 +2953,9 @@ func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2 @*/ // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2974,20 +2990,16 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ assert p.DstIsLocalIngressID(ub) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) - // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ assert slayers.ValidPktMetaHdr(ub) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) // @ assert reveal p.LastHopLen(ub) // @ assert p.scionLayer.EqAbsHeader(ub) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) // @ ghost var ubLL []byte // @ ghost if &p.scionLayer === p.lastLayer { @@ -3040,11 +3052,11 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ ensures reserr != nil ==> reserr.ErrorMem() // constracts for IO-spec // @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ requires len(absPkt(ub).CurrSeg.Future) > 0 +// @ requires absPkt(ub).PathNotFullyTraversed() // @ requires p.EqAbsHopField(absPkt(ub)) // @ requires p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) -// @ ensures reserr == nil ==> len(absPkt(ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> absPkt(ub).PathNotFullyTraversed() // @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(ub)) // @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(ub)) // @ ensures reserr == nil ==> absPkt(ub) == old(absPkt(ub)) @@ -3053,6 +3065,9 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ absIO_val(respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { + // @ reveal p.EqAbsHopField(absPkt(ub)) + // @ assert let fut := absPkt(ub).CurrSeg.Future in + // @ fut == seq[io.IO_HF]{p.hopField.ToIO_HF()} ++ fut[1:] // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -3091,17 +3106,15 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho return processResult{}, serrors.WrapStr("update hop field", err) } // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(scion.validPktMetaHdr(ubPath) && p.path.GetBase(ubPath).EqAbsHeader(ubPath)) // postcondition of SetHopfield // @ slayers.IsSupportedPktSubslice(ub, slayers.CmnHdrLen) // @ sl.Unslice_Bytes(ub, 0, slayers.CmnHdrLen, R54) // @ sl.CombineAtIndex_Bytes(ub, 0, startP, slayers.CmnHdrLen, R54) // @ p.scionLayer.ValidHeaderOffsetFromSubSliceLemma(ub, startP) // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP) // @ absPktFutureLemma(ub) - // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(ub))) // postcondition of SetHopfield - // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(ub))) + // @ assert reveal p.EqAbsHopField(absPkt(ub)) + // @ assert reveal p.EqAbsInfoField(absPkt(ub)) // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) - // @ TemporaryAssumeForIO(absPkt(ub) == old(absPkt(ub))) // @ ghost var ubLL []byte // @ ghost if &p.scionLayer === p.lastLayer { diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra index b238a4ddd..8aa346ff3 100644 --- a/router/io-spec-abstract-transitions.gobra +++ b/router/io-spec-abstract-transitions.gobra @@ -18,6 +18,7 @@ package router import ( "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/pkg/slayers" . "verification/utils/definitions" io "verification/io" @@ -26,7 +27,7 @@ import ( ) ghost -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { return let currseg := pkt.CurrSeg in @@ -35,8 +36,8 @@ pure func CurrSegIO_ifs(pkt io.IO_pkt2, dir bool) option[io.IO_ifs] { ghost opaque -requires len(oldPkt.CurrSeg.Future) > 0 -ensures len(newPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() +ensures newPkt.PathNotFullyTraversed() ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) decreases pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { @@ -50,7 +51,7 @@ pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in @@ -71,7 +72,7 @@ pure func AbsValidateIngressIDConstraintXover(pkt io.IO_pkt2, ingressID option[i ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { return let currseg := pkt.CurrSeg in @@ -81,7 +82,7 @@ pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs ghost opaque requires dp.Valid() -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in @@ -90,7 +91,7 @@ pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPl ghost opaque -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() ensures len(newPkt.CurrSeg.Future) >= 0 decreases pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { @@ -108,7 +109,7 @@ requires oldPkt.LeftSeg != none[io.IO_seg2] requires len(oldPkt.CurrSeg.Future) == 1 requires len(get(oldPkt.LeftSeg).Future) > 0 requires len(get(oldPkt.LeftSeg).History) == 0 -ensures len(newPkt.CurrSeg.Future) > 0 +ensures newPkt.PathNotFullyTraversed() ensures newPkt.RightSeg != none[io.IO_seg2] ensures len(get(newPkt.RightSeg).Past) > 0 decreases @@ -124,7 +125,7 @@ pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { ghost opaque requires dp.Valid() -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires pkt.RightSeg != none[io.IO_seg2] requires len(get(pkt.RightSeg).Past) > 0 decreases @@ -137,7 +138,7 @@ pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec ghost opaque -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { return let currseg := pkt.CurrSeg in @@ -154,7 +155,7 @@ ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] requires egressID == none[io.IO_ifs] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) @@ -184,7 +185,7 @@ ghost requires dp.Valid() requires egressID != none[io.IO_ifs] requires get(egressID) in domain(dp.GetNeighborIAs()) -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) @@ -216,7 +217,7 @@ func ExternalEnterOrExitEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], ne ghost requires dp.Valid() requires ingressID != none[io.IO_ifs] -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) diff --git a/router/io-spec-lemmas.gobra b/router/io-spec-lemmas.gobra index e6733d2ce..ba26a992a 100644 --- a/router/io-spec-lemmas.gobra +++ b/router/io-spec-lemmas.gobra @@ -32,7 +32,7 @@ preserves acc(sl.Bytes(raw, 0, len(raw)), R55) ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> absIO_val(raw, ingressID).isIO_val_Pkt2 && absIO_val(raw, ingressID).IO_val_Pkt2_2 == absPkt(raw) && - len(absPkt(raw).CurrSeg.Future) > 0 + absPkt(raw).PathNotFullyTraversed() decreases func absIO_valLemma(raw []byte, ingressID uint16) { if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ @@ -48,7 +48,7 @@ requires acc(sl.Bytes(raw, 0, len(raw)), R56) requires slayers.ValidPktMetaHdr(raw) ensures acc(sl.Bytes(raw, 0, len(raw)), R56) ensures slayers.ValidPktMetaHdr(raw) -ensures len(absPkt(raw).CurrSeg.Future) > 0 +ensures absPkt(raw).PathNotFullyTraversed() decreases func absPktFutureLemma(raw []byte) { reveal slayers.ValidPktMetaHdr(raw) @@ -70,11 +70,11 @@ func absPktFutureLemma(raw []byte) { offset := scion.HopFieldOffset(numINF, prevSegLen, headerOffsetWithMetaLen) pkt := reveal absPkt(raw) assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset, currInfIdx, currHfIdx-prevSegLen, segLen, headerOffsetWithMetaLen) - assert len(pkt.CurrSeg.Future) > 0 + assert pkt.PathNotFullyTraversed() } ghost -requires len(oldPkt.CurrSeg.Future) > 0 +requires oldPkt.PathNotFullyTraversed() requires AbsValidateIngressIDConstraint(oldPkt, ingressID) requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) ensures AbsValidateIngressIDConstraint(newPkt, ingressID) @@ -166,7 +166,7 @@ ghost opaque requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { return let currseg := pkt.CurrSeg in @@ -178,7 +178,7 @@ pure func (p *scionPacketProcessor) NoBouncingPkt(pkt io.IO_pkt2) bool { ghost requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) requires (egressID in p.d.getDomExternal()) || p.ingressID != 0 ensures acc(&p.d, R55) && acc(p.d.Mem(), _) @@ -193,7 +193,7 @@ func (p *scionPacketProcessor) EstablishNoBouncingPkt(pkt io.IO_pkt2, egressID u ghost requires acc(&p.d, R55) && acc(p.d.Mem(), _) requires acc(&p.ingressID, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() requires AbsEgressInterfaceConstraint(pkt, path.ifsToIO_ifs(egressID)) requires p.NoBouncingPkt(pkt) requires !(egressID in p.d.getDomExternal()) @@ -369,7 +369,7 @@ func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end ghost opaque requires acc(&p.hopField, R55) -requires len(pkt.CurrSeg.Future) > 0 +requires pkt.PathNotFullyTraversed() decreases pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { return let absHop := p.hopField.ToIO_HF() in diff --git a/verification/io/io_spec_definitions.gobra b/verification/io/io_spec_definitions.gobra index 9f78b8969..e7dc18e96 100644 --- a/verification/io/io_spec_definitions.gobra +++ b/verification/io/io_spec_definitions.gobra @@ -23,18 +23,18 @@ ghost requires len(currseg.Future) > 0 decreases pure func establishGuardTraversedseg(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: currseg.Past, - Future: currseg.Future, - History: currseg.History, - } + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: currseg.Past, + Future: currseg.Future, + History: currseg.History, + } } // Establishes the traversed segment for packets that are incremented (external). @@ -42,91 +42,114 @@ ghost requires len(currseg.Future) > 0 decreases pure func establishGuardTraversedsegInc(currseg IO_seg3, direction bool) IO_seg3 { - return let uinfo := direction ? - upd_uinfo(currseg.UInfo, currseg.Future[0]) : - currseg.UInfo in - IO_seg3_ { - AInfo: currseg.AInfo, - UInfo: uinfo, - ConsDir: currseg.ConsDir, - Peer: currseg.Peer, - Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, - Future: currseg.Future[1:], - History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, - } + return let uinfo := direction ? + upd_uinfo(currseg.UInfo, currseg.Future[0]) : + currseg.UInfo in + IO_seg3_ { + AInfo: currseg.AInfo, + UInfo: uinfo, + ConsDir: currseg.ConsDir, + Peer: currseg.Peer, + Past: seq[IO_HF]{currseg.Future[0]} ++ currseg.Past, + Future: currseg.Future[1:], + History: seq[IO_ahi]{currseg.Future[0].Toab()} ++ currseg.History, + } } ghost +requires len(seg.Future) > 0 decreases -pure func (seg IO_seg3) UpdateCurrSeg( info AbsInfoField) IO_seg3 { - return IO_seg3_ { - info.AInfo, - info.UInfo, - info.ConsDir, - info.Peer, - seg.Past, - seg.Future, - seg.History, - } +pure func (seg IO_seg3) UpdateCurrHf(hf IO_HF) IO_seg3 { + return IO_seg3_ { + seg.AInfo, + seg.UInfo, + seg.ConsDir, + seg.Peer, + seg.Past, + seq[IO_HF]{hf} ++ seg.Future[1:], + seg.History, + } +} + +ghost +requires pkt.PathNotFullyTraversed() +decreases +pure func (pkt IO_pkt2) UpdateHopField(hf IO_HF) IO_pkt2 { + return let newCurrSeg := pkt.CurrSeg.UpdateCurrHf(hf) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} +} + +ghost +decreases +pure func (seg IO_seg3) UpdateCurrSeg(info AbsInfoField) IO_seg3 { + return IO_seg3_ { + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + seg.Past, + seg.Future, + seg.History, + } } ghost decreases pure func (pkt IO_pkt2) UpdateInfoField(info AbsInfoField) IO_pkt2 { - return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in - IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} + return let newCurrSeg := pkt.CurrSeg.UpdateCurrSeg(info) in + IO_Packet2{newCurrSeg, pkt.LeftSeg, pkt.MidSeg, pkt.RightSeg} } // This type simplifies the infoField, making it easier // to use than the IO_seg3 from the IO-spec. type AbsInfoField adt { - AbsInfoField_ { - AInfo IO_ainfo - UInfo set[IO_msgterm] - ConsDir bool - Peer bool - } + AbsInfoField_ { + AInfo IO_ainfo + UInfo set[IO_msgterm] + ConsDir bool + Peer bool + } } // The segment lengths of a packet are frequently used together. // This type combines them into a single structure to simplify // their specification. type SegLens adt { - SegLens_ { - Seg1Len int - Seg2Len int - Seg3Len int - } + SegLens_ { + Seg1Len int + Seg2Len int + Seg3Len int + } } ghost decreases pure func (s SegLens) Valid() bool { - return s.Seg1Len > 0 && - s.Seg2Len >= 0 && - s.Seg3Len >= 0 + return s.Seg1Len > 0 && + s.Seg2Len >= 0 && + s.Seg3Len >= 0 } ghost decreases pure func CombineSegLens(seg1Len int, seg2Len int, seg3Len int) SegLens { - return SegLens_ { - seg1Len, - seg2Len, - seg3Len, - } + return SegLens_ { + seg1Len, + seg2Len, + seg3Len, + } } ghost decreases pure func (s SegLens) NumInfoFields() int { - return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) + return s.Seg3Len > 0 ? 3 : (s.Seg2Len > 0 ? 2 : (s.Seg1Len > 0 ? 1 : 0)) } ghost decreases pure func (s SegLens) TotalHops() int { - return s.Seg1Len + s.Seg2Len + s.Seg3Len + return s.Seg1Len + s.Seg2Len + s.Seg3Len } ghost @@ -141,4 +164,10 @@ ensures res <= currHF decreases pure func (s SegLens) LengthOfPrevSeg(currHF int) (res int) { return s.Seg1Len > currHF ? 0 : ((s.Seg1Len + s.Seg2Len) > currHF ? s.Seg1Len : s.Seg1Len + s.Seg2Len) -} \ No newline at end of file +} + +ghost +decreases +pure func (pkt IO_pkt2) PathNotFullyTraversed() bool { + return len(pkt.CurrSeg.Future) > 0 +}