diff --git a/router/dataplane.go b/router/dataplane.go index 4ffcef279..d7e84f3cf 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -137,9 +137,8 @@ type BatchConn interface { // @ ensures io.token(old(MultiReadBioNext(place, prophecyM))) // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, ifsToIO_ifs(ingressID))) // @ ensures err == nil ==> - // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> - // @ unfolding acc(msgs[i].Mem(), R56) in absIO_val(dp, msgs[i].Buffers[0], ingressID) == - // @ old(MultiReadBioIO_val(place, n)[i]) + // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) // TODO (Markus): uint16 or option[io.IO_ifs] for ingress ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires acc(addr.Mem(), _) @@ -152,10 +151,19 @@ type BatchConn interface { // (VerifiedSCION) opted for less reusable spec for WriteBatch for // performance reasons. // @ requires len(msgs) == 1 - // @ preserves acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() - WriteBatch(msgs underlayconn.Messages, flags int) (n int, err error) + // contracts for IO-spec + // @ requires dp.Valid() + // @ requires MsgToAbsVal(dp, &msgs[0], egressID) == ioAbsPkts + // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) + // @ ensures dp.Valid() + // (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue + // after failing to send a packet. + // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) + WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -732,6 +740,7 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) // @ requires io.token(place) && dp.dp3s_iospec_ordered(state, place) +// @ #backend[moreJoins()] func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost state io.IO_dp3s_state_local, ghost dp io.DataPlaneSpec @*/) error { // @ share d, ctx d.mtx.Lock() @@ -791,8 +800,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ requires let d := *dPtr in // @ d.DpAgreesWithSpec(dp) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ #backend[moreJoins()] func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { - // @ ghost ioIngressID := ifsToIO_ifs(ingressID) d := *dPtr msgs := conn.NewReadMessages(inputBatchCnt) // @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> @@ -855,8 +864,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant ingressID in d.getDomForwardingMetrics() // @ invariant acc(rd.Mem(), _) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() for d.running { + // @ ghost ioIngressID := ifsToIO_ifs(ingressID) // Multi recv event // @ ghost ioLock.Lock() // @ unfold SharedInv!< dp, ioSharedArg !>() @@ -864,14 +876,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost numberOfReceivedPacketsProphecy := AllocProphecy() // @ ExtractMultiReadBio(dp, t, numberOfReceivedPacketsProphecy, s) // @ MultiUpdateElemWitness(t, numberOfReceivedPacketsProphecy, ioIngressID, s, ioSharedArg) - // @ ghost ioValSeq := MultiReadBioIO_val(t,numberOfReceivedPacketsProphecy) + // @ ghost ioValSeq := MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy) // @ ghost sN := MultiReadBioUpd(t, numberOfReceivedPacketsProphecy, s) // @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy) // @ assert dp.dp3s_iospec_ordered(sN, tN) + // @ BeforeReadBatch: pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/) + // @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // @ ghost *ioSharedArg.State = sN // @ ghost *ioSharedArg.Place = tN + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq) // @ fold SharedInv!< dp, ioSharedArg !>() // @ ioLock.Unlock() @@ -893,6 +915,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ !msgs[i].HasWildcardPermAddr() // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) + // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary // complications with permissions @@ -915,6 +939,15 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID + // contracts for IO-spec + // @ invariant pkts <= len(ioValSeq) + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() + // @ invariant ioIngressID == ifsToIO_ifs(ingressID) + // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) for i0 := 0; i0 < pkts; i0++ { // @ assert &msgs[:pkts][i0] == &msgs[i0] // @ preserves 0 <= i0 && i0 < pkts && pkts <= len(msgs) @@ -946,12 +979,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert p.Buffers === m.Buffers // @ assert acc(&p.Buffers[0]) // @ assert p.N <= len(p.Buffers[0]) - // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, writePerm) + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) tmpBuf := p.Buffers[0][:p.N] + // @ ghost absPktTmpBuf := absIO_val(dp, tmpBuf, ingressID) + // @ ghost absPktBuf0 := absIO_val(dp, msgs[i0].Buffers[0], ingressID) + // @ assert msgs[i0] === p + // @ absIO_valWidenLemma(dp, p.Buffers[0], ingressID, p.N) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0 + // @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) + // @ assert ioValSeq[i0].isIO_val_Pkt2 ==> + // @ ElemWitness(ioSharedArg.IBufY, ioIngressID, ioValSeq[i0].IO_val_Pkt2_2) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0] + // @ assert ifsToIO_ifs(processor.getIngressID()) == ioIngressID + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)) - result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp@*/) + result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/) // @ fold scmpErr.Mem() + switch { case err == nil: // @ unfold scmpErr.Mem() @@ -999,6 +1044,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta continue } + // (VerifiedSCION) we currently have this assumption because we cannot think of a sound way to capture + // the behaviour of errors.As(...) in our specifications. Nonetheless, we checked extensively that, when + // processPkt does not return an error or returns an scmpError (and thus errors.As(err, &scmpErr) succeeds), + // result.OutPkt is always non-nil. For the other kinds of errors, the result is nil, but that branch is killed + // before this point. + // @ assume result.OutPkt != nil + // Write to OutConn; drop the packet if this would block. // Use WriteBatch because it's the only available function that // supports MSG_DONTWAIT. @@ -1012,8 +1064,26 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() + // @ assert absIO_val(dp, result.OutPkt, result.EgressID) == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) - _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT) + + // @ ghost ioLock.Lock() + // @ unfold SharedInv!< dp, ioSharedArg !>() + // @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State + // @ ghost if(newAbsPkt.isIO_val_Pkt2) { + // @ ApplyElemWitness(s.obuf, ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) + // @ assert newAbsPkt.IO_val_Pkt2_2 in AsSet(s.obuf[newAbsPkt.IO_val_Pkt2_1]) + // @ assert dp.dp3s_iospec_bio3s_send_guard(s, t, newAbsPkt) + // @ } else { assert newAbsPkt.isIO_val_Unsupported } + // @ unfold dp.dp3s_iospec_ordered(s, t) + // @ unfold dp.dp3s_iospec_bio3s_send(s, t) + // @ io.TriggerBodyIoSend(newAbsPkt) + // @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt) + _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt, dp @*/) + // @ ghost *ioSharedArg.Place = tN + // @ fold SharedInv!< dp, ioSharedArg !>() + // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) @@ -1280,7 +1350,7 @@ type processResult struct { } // @ requires acc(d.Mem(), _) && d.getMacFactory() != nil -// @ ensures res.sInit() && res.sInitD() == d +// @ ensures res.sInit() && res.sInitD() == d && res.getIngressID() == ingressID // @ decreases func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) { var verScionTmp gopacket.SerializeBuffer @@ -1309,6 +1379,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess // @ preserves p.sInit() // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitRawPkt() == nil // @ ensures p.sInitPath() == nil // @ ensures p.sInitHopField() == path.HopField{} @@ -1341,10 +1412,11 @@ func (p *scionPacketProcessor) reset() (err error) { // @ d.WellConfigured() && // @ d.getValSvc() != nil && // @ d.getValForwardingMetrics() != nil && -// @ d.DpAgreesWithSpec(dp) +// @ d.DpAgreesWithSpec(dp) // @ ensures p.sInit() // @ ensures acc(p.sInitD().Mem(), _) // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitD().validResult(respr, addrAliasesPkt) // @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( @@ -1358,13 +1430,15 @@ func (p *scionPacketProcessor) reset() (err error) { // @ requires dp.Valid() // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; // @ requires let absPkt := absIO_val(dp, rawPkt, p.getIngressID()) in -// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) -// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> -// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) -// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> respr.OutPkt != nil && -// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) -// TODO: On a first step, we will prove that whenever we have a valid scion packet in processSCION, -// the correct "next packet" is computed +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) +// @ ensures dp.Valid() +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures reserr != nil && respr.OutPkt != nil ==> newAbsPkt.isIO_val_Unsupported +// @ ensures respr.OutPkt != nil ==> newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) func (p *scionPacketProcessor) processPkt(rawPkt []byte, srcAddr *net.UDPAddr /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { @@ -1650,8 +1724,10 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) // @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> // @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) -// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> respr.OutPkt != nil && +// @ ensures respr.OutPkt != nil ==> // @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool @@ -2819,7 +2895,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, } // verify the new block if r, err := p.verifyCurrentMAC(); err != nil { - // fold acc(p.scionLayer.Mem(ub), R3) // @ p.scionLayer.DowngradePerm(ub) return r, serrors.WithCtx(err, "info", "after xover") /*@, false, io.IO_val_Unit{} @*/ } diff --git a/router/dataplane_concurrency_model.gobra b/router/dataplane_concurrency_model.gobra index 10dfc049d..79e273864 100644 --- a/router/dataplane_concurrency_model.gobra +++ b/router/dataplane_concurrency_model.gobra @@ -99,6 +99,7 @@ pure func MultiReadBioCorrectIfs( } ghost +opaque requires 0 <= expectedPkts && MultiReadBio(t, expectedPkts) ensures len(res) == expectedPkts decreases expectedPkts @@ -159,8 +160,10 @@ requires ElemAuth(s.ibuf, y.IBufY) && ElemAuth(s.obuf, y.OBufY) ensures MultiReadBio(t, n) ensures MultiReadBioUpd(t, n, s) == old(MultiReadBioUpd(t, n, s)) ensures MultiReadBioNext(t, n) == old(MultiReadBioNext(t, n)) -ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) && ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) -ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) +ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) +ensures ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY) +ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> + MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) decreases n func MultiUpdateElemWitness( t io.Place, @@ -180,6 +183,7 @@ func MultiUpdateElemWitness( } if 0 <= n && MultiReadBioCorrectIfs(t, n, k) { + reveal MultiReadBioIO_val(t, n) fold MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n)) } } @@ -215,6 +219,16 @@ func MultiElemWitnessConv(y ElemRA,k Key, es seq[io.IO_val]) { multiElemWitnessConvAux(y,k,es,0) } +ghost +requires 0 <= currIdx && currIdx < len(es) +requires MultiElemWitnessWithIndex(y, k, es, currIdx) +ensures es[currIdx].isIO_val_Pkt2 ==> ElemWitness(y, k, es[currIdx].IO_val_Pkt2_2) +ensures MultiElemWitnessWithIndex(y, k, es, currIdx + 1) +decreases +func MultiElemWitnessStep(y ElemRA, k Key, es seq[io.IO_val], currIdx int) { + unfold MultiElemWitnessWithIndex(y, k, es, currIdx) +} + ghost requires i >= 0 requires MultiElemWitness(y,k,es[i:]) diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 763b5a43d..a67c38e58 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -17,12 +17,14 @@ package router import ( - sl "verification/utils/slices" - "verification/io" - "verification/dependencies/encoding/binary" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/private/topology" + "golang.org/x/net/ipv4" + + "verification/dependencies/encoding/binary" + "verification/io" + sl "verification/utils/slices" . "verification/utils/definitions" ) @@ -652,3 +654,13 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { d.dpSpecWellConfiguredNeighborIAs(dp) && d.dpSpecWellConfiguredLinkTypes(dp) } + +ghost +requires dp.Valid() +requires acc(msg.Mem(), R50) +decreases +pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { + return unfolding acc(msg.Mem(), R50) in + absIO_val(dp, msg.Buffers[0], ingressID) +} + diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra new file mode 100644 index 000000000..e000c6a11 --- /dev/null +++ b/router/widen-lemma.gobra @@ -0,0 +1,970 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" +) + +// Some thins in this file can be simplified. Nonetheless, the important definition here +// is absIO_valWidenLemma. Everything else can be seen as an implementation detail. + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +preserves dp.Valid() +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) +decreases +func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) { + var ret1 io.IO_val + var ret2 io.IO_val + + if (validPktMetaHdr(raw[:length]) && absPkt(dp, raw[:length]) != none[io.IO_pkt2]) { + validPktMetaHdrWidenLemma(raw, length) + assert validPktMetaHdr(raw) + absPktWidenLemma(dp, raw, length) + assert absPkt(dp, raw) != none[io.IO_pkt2] + + ret1 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw))}) + ret2 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw[:length]))}) + assert ret1 == reveal absIO_val(dp, raw, ingressID) + assert ret2 == reveal absIO_val(dp, raw[:length], ingressID) + assert ret1 == ret2 + assert absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) + } else { + assert !(reveal absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2) + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures validPktMetaHdr(raw) +decreases +func validPktMetaHdrWidenLemma(raw []byte, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal validPktMetaHdr(raw[:length]) + ret1 := reveal validPktMetaHdr(raw) + ret2 := reveal validPktMetaHdr(raw[:length]) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +requires validPktMetaHdr(raw) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +ensures validPktMetaHdr(raw) +ensures validPktMetaHdr(raw[:length]) +ensures absPkt(dp, raw) == absPkt(dp, raw[:length]) +decreases +func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { + + // declarations + var last1 io.IO_as + var last2 io.IO_as + var first1 io.IO_as + var first2 io.IO_as + var leftAsidSeq1 option[seq[io.IO_as]] + var leftAsidSeq2 option[seq[io.IO_as]] + var rightAsidSeq1 option[seq[io.IO_as]] + var rightAsidSeq2 option[seq[io.IO_as]] + var midAsidSeq1 option[seq[io.IO_as]] + var midAsidSeq2 option[seq[io.IO_as]] + var midAsid1 option[io.IO_as] + var midAsid2 option[io.IO_as] + var ret1 option[io.IO_pkt2] + var ret2 option[io.IO_pkt2] + var lm bool + + // abspkt step by step + _ := reveal validPktMetaHdr(raw) + _ := reveal validPktMetaHdr(raw[:length]) + hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[0:4]) + hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert hdr1 == hdr2 + + metaHdr1 := scion.DecodedFrom(hdr1) + metaHdr2 := scion.DecodedFrom(hdr2) + assert metaHdr1 == metaHdr2 + + currINFIdx1 := int(metaHdr1.CurrINF) + currINFIdx2 := int(metaHdr2.CurrINF) + assert currINFIdx1 == currINFIdx2 + + currHFIdx1 := int(metaHdr1.CurrHF) + currHFIdx2 := int(metaHdr2.CurrHF) + assert currHFIdx1 == currHFIdx2 + + seg1Len1 := int(metaHdr1.SegLen[0]) + seg1Len2 := int(metaHdr2.SegLen[0]) + assert seg1Len1 == seg1Len2 + + seg2Len1 := int(metaHdr1.SegLen[1]) + seg2Len2 := int(metaHdr2.SegLen[1]) + assert seg2Len1 == seg2Len2 + + seg3Len1 := int(metaHdr1.SegLen[2]) + seg3Len2 := int(metaHdr2.SegLen[2]) + assert seg3Len1 == seg3Len2 + + segLen1 := lengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + segLen2 := lengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert segLen1 == segLen2 + + prevSegLen1 := lengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + prevSegLen2 := lengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert prevSegLen1 == prevSegLen2 + + numINF1 := numInfoFields(seg1Len1, seg2Len1, seg3Len1) + numINF2 := numInfoFields(seg1Len2, seg2Len2, seg3Len2) + assert numINF1 == numINF2 + + offset1 := hopFieldOffset(numINF1, 0) + offset2 := hopFieldOffset(numINF2, 0) + assert offset1 == offset2 + + consDir1 := path.ConsDir(raw, currINFIdx1) + consDir2 := path.ConsDir(raw[:length], currINFIdx2) + consDirWidenLemma(raw, length, currINFIdx1) + assert consDir1 == consDir2 + + asidForCurrSegWidenLemma(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid(), length) + currAsidSeq2 := asidForCurrSeg(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid()) + currAsidSeq1 := asidForCurrSeg(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2+segLen2, prevSegLen2, consDir2, dp.Asid()) + assert currAsidSeq1 == currAsidSeq2 + + if (currAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + + last1 = get(currAsidSeq1)[segLen1-1] + last2 = get(currAsidSeq2)[segLen1-1] + assert last1 == last2 + + first1 = get(currAsidSeq1)[0] + first2 = get(currAsidSeq2)[0] + assert first1 == first2 + + asidsForLeftSegWidenLemma(dp, raw, numINF1, currINFIdx1+1, seg1Len1, seg2Len1, seg3Len1, last1, length) + leftAsidSeq1 = asidsForLeftSeg(dp, raw, numINF1, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, last1) + leftAsidSeq2 = asidsForLeftSeg(dp, raw[:length], numINF2, currINFIdx2 + 1, seg1Len2, seg2Len2, seg3Len2, last2) + assert leftAsidSeq1 == leftAsidSeq2 + + asidsForRightSegWidenLemma(dp, raw, numINF1, currINFIdx1-1, seg1Len1, seg2Len1, seg3Len1, first1, length) + rightAsidSeq1 = asidsForRightSeg(dp, raw, numINF1, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, first1) + rightAsidSeq2 = asidsForRightSeg(dp, raw[:length], numINF2, currINFIdx2 - 1, seg1Len2, seg2Len2, seg3Len2, first2) + assert rightAsidSeq1 == rightAsidSeq2 + + if (leftAsidSeq1 == none[seq[io.IO_as]] || rightAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + assert leftAsidSeq2 != none[seq[io.IO_as]] && rightAsidSeq2 != none[seq[io.IO_as]] + + midAsid1 = ((currINFIdx1 == 0 && seg2Len1 > 0 && seg3Len1 > 0) ? some(get(leftAsidSeq1)[len(get(leftAsidSeq1))-1]) : (currINFIdx1 == 2 && seg2Len1 > 0) ? some(get(rightAsidSeq1)[0]) : none[io.IO_as]) + midAsid2 = ((currINFIdx2 == 0 && seg2Len2 > 0 && seg3Len2 > 0) ? some(get(leftAsidSeq2)[len(get(leftAsidSeq2))-1]) : (currINFIdx2 == 2 && seg2Len2 > 0) ? some(get(rightAsidSeq2)[0]) : none[io.IO_as]) + assert midAsid1 == midAsid2 + + asidsForMidSegWidenLemma(dp, raw, numINF1, currINFIdx1+2, seg1Len1, seg2Len1, seg3Len1, midAsid1, length) + midAsidSeq1 = asidsForMidSeg(dp, raw, numINF1, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, midAsid1) + midAsidSeq2 = asidsForMidSeg(dp, raw[:length], numINF2, currINFIdx2 + 2, seg1Len2, seg2Len2, seg3Len2, midAsid2) + assert midAsidSeq1 == midAsidSeq2 + if (midAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1), length) + leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, get(leftAsidSeq1), length) + midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, get(midAsidSeq1), length) + rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, get(rightAsidSeq1), length) + ret1 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1)), + LeftSeg : leftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, get(leftAsidSeq1)), + MidSeg : midSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, get(midAsidSeq1)), + RightSeg : rightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, get(rightAsidSeq1)), + })) + ret2 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, get(currAsidSeq2)), + LeftSeg : leftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, get(leftAsidSeq2)), + MidSeg : midSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, get(midAsidSeq2)), + RightSeg : rightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, get(rightAsidSeq2)), + })) + reveal absPkt(dp, raw) + reveal absPkt(dp, raw[:length]) + assert ret1 == absPkt(dp, raw) + assert ret2 == absPkt(dp, raw[:length]) + assert ret1 == ret2 + } + } + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +requires 0 <= currINFIdx +requires path.InfoFieldOffset(currINFIdx) < length +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +ensures path.ConsDir(raw, currINFIdx) == path.ConsDir(raw[:length], currINFIdx) +decreases +func consDirWidenLemma(raw []byte, length int, currINFIdx int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + assert &raw[path.InfoFieldOffset(currINFIdx)] == &raw[:length][path.InfoFieldOffset(currINFIdx)] + assert raw[path.InfoFieldOffset(currINFIdx)] == raw[:length][path.InfoFieldOffset(currINFIdx)] + assert (raw[path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) == (raw[:length][path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires currHFIdx1 < segLen1 +requires hopFieldOffset(numINF1, segLen1) <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) == + asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) +decreases +func asidForCurrSegWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + currHFIdx1 int, + segLen1 int, + prevSegLen1 int, + consDir1 bool, + asid1 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var left1 option[seq[io.IO_as]] + var left2 option[seq[io.IO_as]] + var right1 option[seq[io.IO_as]] + var right2 option[seq[io.IO_as]] + + + if (segLen1 == 0) { + assert segLen1 == 0 + ret1 = some(seq[io.IO_as]{}) + ret2 = some(seq[io.IO_as]{}) + assert ret1 == asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + asidsBeforeWidenLemma(dp, raw, numINF1, numINF1, currHFIdx1, currHFIdx1, prevSegLen1, prevSegLen1, consDir1, consDir1, asid1, asid1, length) + left1 = asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + left2 = asidsBefore(dp, raw[:length], numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert left1 == left2 + newP := (R52 + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1, length, newP) + right1 = asidsAfter(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1) + right2 = asidsAfter(dp, raw[:length], numINF1, currHFIdx1, segLen1, consDir1, asid1) + assert right1 == right2 + if (left1 == none[seq[io.IO_as]] || right1 == none[seq[io.IO_as]]) { + assert (left2 == none[seq[io.IO_as]] || right2 == none[seq[io.IO_as]]) + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + assert (left2 != none[seq[io.IO_as]] && right2 != none[seq[io.IO_as]]) + ret1 = some(get(left1) ++ get(right1)[1:]) + ret2 = some(get(left2) ++ get(right2)[1:]) + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } + } + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 +} + +ghost +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires consDir1 == consDir2 +requires prevSegLen1 == prevSegLen2 +requires currHFIdx1 == currHFIdx2 +requires numINF1 == numINF2 +requires asid1 == asid2 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures forall i int :: { &raw[i] } 0 <= i && i < len(raw) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) == (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) +ensures forall i int :: { &raw[:length][i] } 0 <= i && i < len(raw[:length]) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) == (unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) +ensures asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) == + asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) +decreases currHFIdx1 - prevSegLen1 +func asidsBeforeWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + prevSegLen1 int, + prevSegLen2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var nextAsid1 option[io.IO_as] + var nextAsid2 option[io.IO_as] + var nextAsidSeq1 option[seq[io.IO_as]] + var nextAsidSeq2 option[seq[io.IO_as]] + + if (currHFIdx1 == prevSegLen1) { + assert currHFIdx2 == prevSegLen2 + ret1 = some(seq[io.IO_as]{asid1}) + ret2 = some(seq[io.IO_as]{asid2}) + assert ret1 == ret2 + } else { + assert currHFIdx2 != prevSegLen2 + nextAsid1 = asidFromIfs(dp, raw, numINF1, currHFIdx1, !consDir1, asid1) + nextAsid2 = asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, !consDir2, asid2) + asidFromIfsWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1, currHFIdx2, !consDir1, !consDir2, asid1, asid2, length) + assert nextAsid1 == nextAsid2 + if (nextAsid1 == none[io.IO_as]) { + assert nextAsid2 == none[io.IO_as] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + assert nextAsid2 != none[io.IO_as] + asidsBeforeWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1-1, currHFIdx2-1, prevSegLen1, prevSegLen2, consDir1, consDir2, get(nextAsid1), get(nextAsid2), length) + nextAsidSeq1 = asidsBefore(dp, raw, numINF1, currHFIdx1-1, prevSegLen1, consDir1, get(nextAsid1)) + nextAsidSeq2 = asidsBefore(dp, raw[:length], numINF2, currHFIdx2-1, prevSegLen2, consDir2, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if (nextAsidSeq1 == none[seq[io.IO_as]]) { + assert nextAsidSeq2 == none[seq[io.IO_as]] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + ret1 = some(get(nextAsidSeq1) ++ seq[io.IO_as]{asid1}) + ret2 = some(get(nextAsidSeq2) ++ seq[io.IO_as]{asid2}) + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } + } + } +} + +ghost +requires 1 <= numINF1 +requires 0 <= currHFIdx1 +requires numINF1 == numINF2 +requires currHFIdx1 == currHFIdx2 +requires consDir1 == consDir2 +requires asid1 == asid2 +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) == + asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +decreases +func asidFromIfsWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + var ret1 option[io.IO_as] + var ret2 option[io.IO_as] + + idx1 := hopFieldOffset(numINF1, currHFIdx1) + idx2 := hopFieldOffset(numINF2, currHFIdx1) + assert idx1 == idx2 + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert forall i int :: { &raw[idx1+2+i] } { &raw[idx1+2:idx1+4][i] } 0 <= i && i < 2 ==> + &raw[idx1+2+i] == &raw[idx1+2:idx1+4][i] + assert forall i int :: { &raw[:length][idx2+2+i] } { &raw[:length][idx2+2:idx2+4][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+2+i] == &raw[:length][idx2+2:idx2+4][i] + assert forall i int :: { &raw[idx1+4+i] } { &raw[idx1+4:idx1+6][i] } 0 <= i && i < 2 ==> + &raw[idx1+4+i] == &raw[idx1+4:idx1+6][i] + assert forall i int :: { &raw[:length][idx2+4+i] } { &raw[idx2+4:idx2+6][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+4+i] == &raw[:length][idx2+4:idx2+6][i] + ifs1 := consDir1 ? binary.BigEndian.Uint16(raw[idx1+4:idx1+6]) : binary.BigEndian.Uint16(raw[idx1+2:idx1+4]) + ifs2 := consDir2 ? binary.BigEndian.Uint16(raw[:length][idx2+4:idx2+6]) : binary.BigEndian.Uint16(raw[:length][idx2+2:idx2+4]) + assert ifs1 == ifs2 + asIfPair1 := io.AsIfsPair{asid1, io.IO_ifs(ifs1)} + asIfPair2 := io.AsIfsPair{asid2, io.IO_ifs(ifs2)} + assert asIfPair1 == asIfPair2 + if (asIfPair1 in domain(dp.GetLinks())) { + assert asIfPair2 in domain(dp.GetLinks()) + ret1 = some(dp.Lookup(asIfPair1).asid) + ret2 = some(dp.Lookup(asIfPair2).asid) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } else { + assert !(asIfPair2 in domain(dp.GetLinks())) + ret1 = none[io.IO_as] + ret2 = none[io.IO_as] + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +} + +// --- The file has been simplified past this point + +ghost +requires R53 < p +requires 1 <= numINF +requires 0 <= currHFIdx && currHFIdx < segLen +requires length <= len(raw) +requires hopFieldOffset(numINF, segLen) <= length +requires dp.Valid() +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) == + asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) +decreases segLen - currHFIdx + 1 +func asidsAfterWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currHFIdx int, segLen int, consDir bool, asid io.IO_as, length int, p perm) { + if currHFIdx != segLen - 1 { + nextAsid1 := asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid) + nextAsid2 := asidFromIfs(dp, raw[:length], numINF, currHFIdx, consDir, asid) + asidFromIfsWidenLemma(dp, raw, numINF, numINF, currHFIdx, currHFIdx, consDir, consDir, asid, asid, length) + assert nextAsid1 == nextAsid2 + if nextAsid1 == none[io.IO_as] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + newP := (p + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1), length, newP) + nextAsidSeq1 := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1)) + nextAsidSeq2 := asidsAfter(dp, raw[:length], numINF, currHFIdx+1, segLen, consDir, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if nextAsidSeq1 == none[seq[io.IO_as]] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + ret := some(seq[io.IO_as]{asid} ++ get(nextAsidSeq1)) + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } + } + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 1 <= currINFIdx && currINFIdx < 4 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForLeftSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as, length int) { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + + if currINFIdx == 1 && seg2Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires -1 <= currINFIdx && currINFIdx < 2 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForRightSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int,seg3Len int, asid io.IO_as, length int) { + if currINFIdx == 1 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 2 <= currINFIdx && currINFIdx < 5 +requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForMidSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as], length int) { + if currINFIdx == 4 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, 1) + consDir2 := path.ConsDir(raw[:length], 1) + consDirWidenLemma(raw, length, 1) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + consDir1 := path.ConsDir(raw, 2) + consDir2 := path.ConsDir(raw[:length], 2) + consDirWidenLemma(raw, length, 2) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= currINFIdx && currINFIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures currSeg(raw, offset, currINFIdx, currHFIdx, asid) == + currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) +decreases +func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as], length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) + + ainfo1 := timestamp(raw, currINFIdx) + ainfo2 := timestamp(raw[:length], currINFIdx) + assert ainfo1 == ainfo2 + + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + assert consDir1 == consDir2 + + peer1 := path.Peer(raw, currINFIdx) + peer2 := path.Peer(raw[:length], currINFIdx) + assert peer1 == peer2 + + segmentWidenLemma(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1, length) + ret1 := segment(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1) + ret2 := segment(raw[:length], offset, currHFIdx, asid, ainfo2, consDir2, peer2) + assert ret1 == reveal currSeg(raw, offset, currINFIdx, currHFIdx, asid) + assert ret2 == reveal currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) + assert ret1 == ret2 + + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +} + +ghost +requires 0 <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) +decreases +func segmentWidenLemma(raw []byte, offset int, currHFIdx int, asid seq[io.IO_as], ainfo io.IO_ainfo, consDir bool, peer bool, length int) { + newP := (R52 + R53)/2 + assert R53 < newP && newP < R52 + hopFieldsConsDirWidenLemma(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopFieldsNotConsDirWidenLemma(raw, offset, len(asid)-1, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopfields1 := consDir ? hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + hopfields2 := consDir ? hopFieldsConsDir(raw[:length], offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw[:length], offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + assert hopfields1 == hopfields2 + + uinfo := uInfo(hopfields1, currHFIdx, consDir) + + ret1 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields1, currHFIdx - 1), + Future : segFuture(hopfields1, currHFIdx), + History : segHistory(hopfields1, currHFIdx - 1), + }) + ret2 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields2, currHFIdx - 1), + Future : segFuture(hopfields2, currHFIdx), + History : segHistory(hopfields2, currHFIdx - 1), + }) + assert ret1 == segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret2 == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret1 == ret2 +} + +ghost +requires R53 < p +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases len(asid) - currHFIdx +func hopFieldsConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == len(asid) { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsConsDirWidenLemma(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := seq[io.IO_HF]{hf1} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) + ret2 := seq[io.IO_HF]{hf2} ++ hopFieldsConsDir(raw[:length], offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) + assert ret1 == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires idx + path.HopLen <= length +requires 0 <= idx +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures hopField(raw, idx, beta, asid, ainfo) == hopField(raw[:length], idx, beta, asid, ainfo) +decreases +func hopFieldWidenLemma(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + + assert forall i int :: { &raw[idx+2+i] } { &raw[idx+2:idx+4][i] } 0 <= i && i < 2 ==> &raw[idx+2+i] == &raw[idx+2:idx+4][i] + assert forall i int :: { &raw[idx+4+i] } { &raw[idx+4:idx+6][i] } 0 <= i && i < 2 ==> &raw[idx+4+i] == &raw[idx+4:idx+6][i] + inif21 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) + inif22 := binary.BigEndian.Uint16(raw[:length][idx+2:idx+4]) + assert inif21 == inif22 + + egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) + op_inif2 := inif21 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif21)) + op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) + ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) + l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), io.IO_msgterm(io.MsgTerm_FS{beta})}}) + hvf := io.mac(io.macKey(io.asidToKey(asid)), l) + + ret1 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + ret2 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + assert ret1 == hopField(raw, idx, beta, asid, ainfo) + assert ret2 == hopField(raw[:length], idx, beta, asid, ainfo) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +} + +ghost +requires R53 < p +requires 0 <= offset +requires -1 <= currHFIdx && currHFIdx < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * currHFIdx + path.HopLen <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases currHFIdx + 1 +func hopFieldsNotConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == -1 { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsNotConsDirWidenLemma(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := hopFieldsNotConsDir(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf1} + ret2 := hopFieldsNotConsDir(raw[:length], offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf2} + assert ret1 == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires 1 <= currINFIdx && currINFIdx < 4 +requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires -1 <= currINFIdx && currINFIdx < 2 +requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 && seg2Len > 0 { + currSegWidenLemma(raw, offset, currINFIdx, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, currINFIdx, seg1Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 <= seg2Len +requires 0 < seg1Len +requires 0 <= length && length <= len(raw) +requires 0 <= seg3Len +requires 2 <= currINFIdx && currINFIdx < 5 +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 4 && seg2Len > 0 { + currSegWidenLemma(raw, offset, 0, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, 0, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, 0, seg1Len, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} diff --git a/verification/io/io-spec.gobra b/verification/io/io-spec.gobra index df46e6ae1..ef2fcb250 100644 --- a/verification/io/io-spec.gobra +++ b/verification/io/io-spec.gobra @@ -430,10 +430,4 @@ requires token(t) && CBio_IN_bio3s_exit(t, v) ensures token(old(dp3s_iospec_bio3s_exit_T(t, v))) func Exit(ghost t Place, ghost v IO_val) -ghost -decreases -requires token(t) && CBioIO_bio3s_send(t, v) -ensures token(old(dp3s_iospec_bio3s_send_T(t, v))) -func Send(ghost t Place, ghost v IO_val) - -/** End of helper functions to perfrom BIO operations **/ \ No newline at end of file +/** End of helper functions to perfrom BIO operations **/