Skip to content

Commit

Permalink
Verify send guard in Run (#263)
Browse files Browse the repository at this point in the history
* remove send operation

* lemma for smaller buffer result in same abstract pkt

* progress send guard

* progress send guard

* Fix incompleteness and continue with send guard (#273)

* backup

* backup

* backup

* backup

* backup

* drop space

* pick better triggers

* add necessary lemma and call to it; contains an assume false that needs to be dropped

* backup

* backup

* add missing loop invariant about ingressID

* backup

* backup

* fix verification error

* try out a simpler trigger

* widen lemma for absIO_val (#268)

* widen lemma for abspkt (non termianting)

* abspkt proven

* renamed io-spec-lemmas

* io val also proven

* cleanup

* merged markus' abspkt improvements

* consdir lemma

* proved

* reinstate lemma4

* fix verification error

* Simplify widen lemma from #268 (#282)

* start simplifying

* continue simplifying

* continue simplifying stuff

* continue simplifying

* continue simplifying

* simplify further

* finish for now

* Update router/io-spec.gobra

---------

Co-authored-by: João Pereira <joaopereira.19@gmail.com>

* Continue send (#283)

* widen lemma for abspkt (non termianting)

* abspkt proven

* renamed io-spec-lemmas

* io val also proven

* cleanup

* merged markus' abspkt improvements

* consdir lemma

* proved

* reinstate lemma4

* fix verification error

* Simplify widen lemma from #268 (#282)

* start simplifying

* continue simplifying

* continue simplifying stuff

* continue simplifying

* continue simplifying

* simplify further

* finish for now

* Update router/io-spec.gobra

* finish send in Run

* propagate changes to processSCION

---------

Co-authored-by: Dspil <dennisspiliopoylos@gmail.com>

* backup

* adapt to the new syntax of backend annotations

* clean-up

* changes according to feedback

---------

Co-authored-by: João Pereira <joaopereira.19@gmail.com>
Co-authored-by: Dionysios Spiliopoulos <32896454+Dspil@users.noreply.github.com>
Co-authored-by: Dspil <dennisspiliopoylos@gmail.com>
  • Loading branch information
4 people authored Mar 25, 2024
1 parent e40741a commit 2c509cc
Show file tree
Hide file tree
Showing 5 changed files with 1,098 additions and 33 deletions.
117 changes: 96 additions & 21 deletions router/dataplane.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,8 @@ type BatchConn interface {
// @ ensures io.token(old(MultiReadBioNext(place, prophecyM)))
// @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, ifsToIO_ifs(ingressID)))
// @ ensures err == nil ==>
// @ forall i int :: { &msgs[i] } 0 <= i && i < n ==>
// @ unfolding acc(msgs[i].Mem(), R56) in absIO_val(dp, msgs[i].Buffers[0], ingressID) ==
// @ old(MultiReadBioIO_val(place, n)[i])
// @ forall i int :: { &msgs[i] } 0 <= i && i < n ==>
// @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i])
// TODO (Markus): uint16 or option[io.IO_ifs] for ingress
ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error)
// @ requires acc(addr.Mem(), _)
Expand All @@ -152,10 +151,19 @@ type BatchConn interface {
// (VerifiedSCION) opted for less reusable spec for WriteBatch for
// performance reasons.
// @ requires len(msgs) == 1
// @ preserves acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr()
// @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr()
// @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr()
// @ ensures err == nil ==> 0 <= n && n <= len(msgs)
// @ ensures err != nil ==> err.ErrorMem()
WriteBatch(msgs underlayconn.Messages, flags int) (n int, err error)
// contracts for IO-spec
// @ requires dp.Valid()
// @ requires MsgToAbsVal(dp, &msgs[0], egressID) == ioAbsPkts
// @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts)
// @ ensures dp.Valid()
// (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue
// after failing to send a packet.
// @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts)))
WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val, ghost dp io.DataPlaneSpec @*/) (n int, err error)
// @ requires Mem()
// @ ensures err != nil ==> err.ErrorMem()
// @ decreases
Expand Down Expand Up @@ -732,6 +740,7 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro
// @ requires dp.Valid()
// @ requires d.DpAgreesWithSpec(dp)
// @ requires io.token(place) && dp.dp3s_iospec_ordered(state, place)
// @ #backend[moreJoins()]
func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost state io.IO_dp3s_state_local, ghost dp io.DataPlaneSpec @*/) error {
// @ share d, ctx
d.mtx.Lock()
Expand Down Expand Up @@ -791,8 +800,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
// @ requires let d := *dPtr in
// @ d.DpAgreesWithSpec(dp)
// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>;
// @ #backend[moreJoins()]
func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) {
// @ ghost ioIngressID := ifsToIO_ifs(ingressID)
d := *dPtr
msgs := conn.NewReadMessages(inputBatchCnt)
// @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==>
Expand Down Expand Up @@ -855,23 +864,36 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
// @ invariant ingressID in d.getDomForwardingMetrics()
// @ invariant acc(rd.Mem(), _)
// @ invariant processor.sInit() && processor.sInitD() === d
// @ invariant processor.getIngressID() == ingressID
// @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>
// @ invariant d.DpAgreesWithSpec(dp) && dp.Valid()
for d.running {
// @ ghost ioIngressID := ifsToIO_ifs(ingressID)
// Multi recv event
// @ ghost ioLock.Lock()
// @ unfold SharedInv!< dp, ioSharedArg !>()
// @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State
// @ ghost numberOfReceivedPacketsProphecy := AllocProphecy()
// @ ExtractMultiReadBio(dp, t, numberOfReceivedPacketsProphecy, s)
// @ MultiUpdateElemWitness(t, numberOfReceivedPacketsProphecy, ioIngressID, s, ioSharedArg)
// @ ghost ioValSeq := MultiReadBioIO_val(t,numberOfReceivedPacketsProphecy)
// @ ghost ioValSeq := MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)

// @ ghost sN := MultiReadBioUpd(t, numberOfReceivedPacketsProphecy, s)
// @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy)
// @ assert dp.dp3s_iospec_ordered(sN, tN)
// @ BeforeReadBatch:
pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/)
// @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq
// @ assert err == nil ==>
// @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==>
// @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i])
// @ assert err == nil ==>
// @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i]
// @ ghost *ioSharedArg.State = sN
// @ ghost *ioSharedArg.Place = tN
// @ assert err == nil ==>
// @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==>
// @ MsgToAbsVal(dp, &msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i])
// @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq)
// @ fold SharedInv!< dp, ioSharedArg !>()
// @ ioLock.Unlock()
Expand All @@ -893,6 +915,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
// @ !msgs[i].HasWildcardPermAddr()
// @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==>
// @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer())
// @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==>
// @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i]

// (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary
// complications with permissions
Expand All @@ -915,6 +939,15 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
// @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==>
// @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer())
// @ invariant processor.sInit() && processor.sInitD() === d
// @ invariant processor.getIngressID() == ingressID
// contracts for IO-spec
// @ invariant pkts <= len(ioValSeq)
// @ invariant d.DpAgreesWithSpec(dp) && dp.Valid()
// @ invariant ioIngressID == ifsToIO_ifs(ingressID)
// @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>;
// @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==>
// @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i]
// @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0)
for i0 := 0; i0 < pkts; i0++ {
// @ assert &msgs[:pkts][i0] == &msgs[i0]
// @ preserves 0 <= i0 && i0 < pkts && pkts <= len(msgs)
Expand Down Expand Up @@ -946,12 +979,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
// @ assert p.Buffers === m.Buffers
// @ assert acc(&p.Buffers[0])
// @ assert p.N <= len(p.Buffers[0])
// @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, writePerm)
// @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm)
tmpBuf := p.Buffers[0][:p.N]
// @ ghost absPktTmpBuf := absIO_val(dp, tmpBuf, ingressID)
// @ ghost absPktBuf0 := absIO_val(dp, msgs[i0].Buffers[0], ingressID)
// @ assert msgs[i0] === p
// @ absIO_valWidenLemma(dp, p.Buffers[0], ingressID, p.N)
// @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0
// @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0)
// @ assert ioValSeq[i0].isIO_val_Pkt2 ==>
// @ ElemWitness(ioSharedArg.IBufY, ioIngressID, ioValSeq[i0].IO_val_Pkt2_2)
// @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0]
// @ assert ifsToIO_ifs(processor.getIngressID()) == ioIngressID
// @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm)
// @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N)
// @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf))
result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp@*/)
result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/)
// @ fold scmpErr.Mem()

switch {
case err == nil:
// @ unfold scmpErr.Mem()
Expand Down Expand Up @@ -999,6 +1044,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
continue
}

// (VerifiedSCION) we currently have this assumption because we cannot think of a sound way to capture
// the behaviour of errors.As(...) in our specifications. Nonetheless, we checked extensively that, when
// processPkt does not return an error or returns an scmpError (and thus errors.As(err, &scmpErr) succeeds),
// result.OutPkt is always non-nil. For the other kinds of errors, the result is nil, but that branch is killed
// before this point.
// @ assume result.OutPkt != nil

// Write to OutConn; drop the packet if this would block.
// Use WriteBatch because it's the only available function that
// supports MSG_DONTWAIT.
Expand All @@ -1012,8 +1064,26 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta
writeMsgs[0].Addr = result.OutAddr
}
// @ sl.NilAcc_Bytes()
// @ assert absIO_val(dp, result.OutPkt, result.EgressID) == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID)
// @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID)
// @ fold acc(writeMsgs[0].Mem(), R50)
_, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT)

// @ ghost ioLock.Lock()
// @ unfold SharedInv!< dp, ioSharedArg !>()
// @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State
// @ ghost if(newAbsPkt.isIO_val_Pkt2) {
// @ ApplyElemWitness(s.obuf, ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2)
// @ assert newAbsPkt.IO_val_Pkt2_2 in AsSet(s.obuf[newAbsPkt.IO_val_Pkt2_1])
// @ assert dp.dp3s_iospec_bio3s_send_guard(s, t, newAbsPkt)
// @ } else { assert newAbsPkt.isIO_val_Unsupported }
// @ unfold dp.dp3s_iospec_ordered(s, t)
// @ unfold dp.dp3s_iospec_bio3s_send(s, t)
// @ io.TriggerBodyIoSend(newAbsPkt)
// @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt)
_, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt, dp @*/)
// @ ghost *ioSharedArg.Place = tN
// @ fold SharedInv!< dp, ioSharedArg !>()
// @ ghost ioLock.Unlock()
// @ unfold acc(writeMsgs[0].Mem(), R50)
// @ ghost if addrAliasesPkt && result.OutAddr != nil {
// @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15)
Expand Down Expand Up @@ -1280,7 +1350,7 @@ type processResult struct {
}

// @ requires acc(d.Mem(), _) && d.getMacFactory() != nil
// @ ensures res.sInit() && res.sInitD() == d
// @ ensures res.sInit() && res.sInitD() == d && res.getIngressID() == ingressID
// @ decreases
func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) {
var verScionTmp gopacket.SerializeBuffer
Expand Down Expand Up @@ -1309,6 +1379,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess

// @ preserves p.sInit()
// @ ensures p.sInitD() == old(p.sInitD())
// @ ensures p.getIngressID() == old(p.getIngressID())
// @ ensures p.sInitRawPkt() == nil
// @ ensures p.sInitPath() == nil
// @ ensures p.sInitHopField() == path.HopField{}
Expand Down Expand Up @@ -1341,10 +1412,11 @@ func (p *scionPacketProcessor) reset() (err error) {
// @ d.WellConfigured() &&
// @ d.getValSvc() != nil &&
// @ d.getValForwardingMetrics() != nil &&
// @ d.DpAgreesWithSpec(dp)
// @ d.DpAgreesWithSpec(dp)
// @ ensures p.sInit()
// @ ensures acc(p.sInitD().Mem(), _)
// @ ensures p.sInitD() == old(p.sInitD())
// @ ensures p.getIngressID() == old(p.getIngressID())
// @ ensures p.sInitD().validResult(respr, addrAliasesPkt)
// @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15)
// @ ensures addrAliasesPkt ==> (
Expand All @@ -1358,13 +1430,15 @@ func (p *scionPacketProcessor) reset() (err error) {
// @ requires dp.Valid()
// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>;
// @ requires let absPkt := absIO_val(dp, rawPkt, p.getIngressID()) in
// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2)
// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==>
// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2)
// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> respr.OutPkt != nil &&
// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID)
// TODO: On a first step, we will prove that whenever we have a valid scion packet in processSCION,
// the correct "next packet" is computed
// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2)
// @ ensures dp.Valid()
// @ ensures respr.OutPkt != nil ==>
// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID)
// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{})
// @ ensures newAbsPkt.isIO_val_Pkt2 ==>
// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2)
// @ ensures reserr != nil && respr.OutPkt != nil ==> newAbsPkt.isIO_val_Unsupported
// @ ensures respr.OutPkt != nil ==> newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID)
func (p *scionPacketProcessor) processPkt(rawPkt []byte,
srcAddr *net.UDPAddr /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) {

Expand Down Expand Up @@ -1650,8 +1724,10 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) {
// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2)
// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==>
// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2)
// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> respr.OutPkt != nil &&
// @ ensures respr.OutPkt != nil ==>
// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID)
// @ ensures reserr != nil && respr.OutPkt != nil ==>
// @ newAbsPkt.isIO_val_Unsupported
func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) {

var ok bool
Expand Down Expand Up @@ -2819,7 +2895,6 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool,
}
// verify the new block
if r, err := p.verifyCurrentMAC(); err != nil {
// fold acc(p.scionLayer.Mem(ub), R3)
// @ p.scionLayer.DowngradePerm(ub)
return r, serrors.WithCtx(err, "info", "after xover") /*@, false, io.IO_val_Unit{} @*/
}
Expand Down
18 changes: 16 additions & 2 deletions router/dataplane_concurrency_model.gobra
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ pure func MultiReadBioCorrectIfs(
}

ghost
opaque
requires 0 <= expectedPkts && MultiReadBio(t, expectedPkts)
ensures len(res) == expectedPkts
decreases expectedPkts
Expand Down Expand Up @@ -159,8 +160,10 @@ requires ElemAuth(s.ibuf, y.IBufY) && ElemAuth(s.obuf, y.OBufY)
ensures MultiReadBio(t, n)
ensures MultiReadBioUpd(t, n, s) == old(MultiReadBioUpd(t, n, s))
ensures MultiReadBioNext(t, n) == old(MultiReadBioNext(t, n))
ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY) && ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY)
ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==> MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n))
ensures ElemAuth(MultiReadBioUpd(t, n, s).ibuf, y.IBufY)
ensures ElemAuth(MultiReadBioUpd(t, n, s).obuf, y.OBufY)
ensures 0 <= n && MultiReadBioCorrectIfs(t, n, k) ==>
MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n))
decreases n
func MultiUpdateElemWitness(
t io.Place,
Expand All @@ -180,6 +183,7 @@ func MultiUpdateElemWitness(
}

if 0 <= n && MultiReadBioCorrectIfs(t, n, k) {
reveal MultiReadBioIO_val(t, n)
fold MultiElemWitness(y.IBufY, k, MultiReadBioIO_val(t, n))
}
}
Expand Down Expand Up @@ -215,6 +219,16 @@ func MultiElemWitnessConv(y ElemRA,k Key, es seq[io.IO_val]) {
multiElemWitnessConvAux(y,k,es,0)
}

ghost
requires 0 <= currIdx && currIdx < len(es)
requires MultiElemWitnessWithIndex(y, k, es, currIdx)
ensures es[currIdx].isIO_val_Pkt2 ==> ElemWitness(y, k, es[currIdx].IO_val_Pkt2_2)
ensures MultiElemWitnessWithIndex(y, k, es, currIdx + 1)
decreases
func MultiElemWitnessStep(y ElemRA, k Key, es seq[io.IO_val], currIdx int) {
unfold MultiElemWitnessWithIndex(y, k, es, currIdx)
}

ghost
requires i >= 0
requires MultiElemWitness(y,k,es[i:])
Expand Down
18 changes: 15 additions & 3 deletions router/io-spec.gobra
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,14 @@
package router

import (
sl "verification/utils/slices"
"verification/io"
"verification/dependencies/encoding/binary"
"github.com/scionproto/scion/pkg/slayers/path"
"github.com/scionproto/scion/pkg/slayers/path/scion"
"github.com/scionproto/scion/private/topology"
"golang.org/x/net/ipv4"

"verification/dependencies/encoding/binary"
"verification/io"
sl "verification/utils/slices"
. "verification/utils/definitions"
)

Expand Down Expand Up @@ -652,3 +654,13 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool {
d.dpSpecWellConfiguredNeighborIAs(dp) &&
d.dpSpecWellConfiguredLinkTypes(dp)
}

ghost
requires dp.Valid()
requires acc(msg.Mem(), R50)
decreases
pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) {
return unfolding acc(msg.Mem(), R50) in
absIO_val(dp, msg.Buffers[0], ingressID)
}

Loading

0 comments on commit 2c509cc

Please sign in to comment.