diff --git a/pkg/slayers/path/hopfield_spec.gobra b/pkg/slayers/path/hopfield_spec.gobra index 53a84309e..e93e22f39 100644 --- a/pkg/slayers/path/hopfield_spec.gobra +++ b/pkg/slayers/path/hopfield_spec.gobra @@ -16,32 +16,51 @@ package path -ghost const MetaLen = 4 +import ( + "verification/io" + "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) pred (h *HopField) Mem() { acc(h) && h.ConsIngress >= 0 && h.ConsEgress >= 0 } -ghost + +ghost decreases -pure func InfoFieldOffset(currINF int) int { - return MetaLen + InfoLen * currINF +pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ + return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost +requires 0 <= start && start <= middle +requires middle + HopLen <= end && end <= len(raw) +requires acc(slices.AbsSlice_Bytes(raw, start, end), _) decreases -pure func ConsDir(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x1 == 0x1 +pure func BytesToIO_HF(raw [] byte, start int, middle int, end int) (io.IO_HF) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle + 2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+6][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+6][k] == &raw[middle + 4 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+6:middle+6+MacLen][k]} 0 <= k && k < MacLen ==> &raw[middle+6:middle+6+MacLen][k] == &raw[middle + 6 + k]) in + unfolding acc(slices.AbsSlice_Bytes(raw, start, end), _) in + let inif2 := binary.BigEndian.Uint16(raw[middle+2:middle+4]) in + let egif2 := binary.BigEndian.Uint16(raw[middle+4:middle+6]) in + let op_inif2 := ifsToIO_ifs(inif2) in + let op_egif2 := ifsToIO_ifs(egif2) in + io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : AbsMac(FromSliceToMacArray(raw[middle+6:middle+6+MacLen])), + }) } -ghost -requires 0 <= currINF -requires InfoFieldOffset(currINF) < len(raw) -requires acc(&raw[InfoFieldOffset(currINF)], _) +ghost decreases -pure func Peer(raw []byte, currINF int) bool { - return raw[InfoFieldOffset(currINF)] & 0x2 == 0x2 +pure func (h HopField) ToIO_HF() (io.IO_HF) { + return io.IO_HF(io.IO_HF_{ + InIF2 : ifsToIO_ifs(h.ConsIngress), + EgIF2 : ifsToIO_ifs(h.ConsEgress), + HVF : AbsMac(h.Mac), + }) } diff --git a/pkg/slayers/path/infofield.go b/pkg/slayers/path/infofield.go index 0e1a9442c..b30edb937 100644 --- a/pkg/slayers/path/infofield.go +++ b/pkg/slayers/path/infofield.go @@ -22,8 +22,10 @@ import ( "github.com/scionproto/scion/pkg/private/serrors" "github.com/scionproto/scion/pkg/private/util" + //@ bits "github.com/scionproto/scion/verification/utils/bitwise" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ "github.com/scionproto/scion/verification/utils/slices" + //@ "verification/io" ) // InfoLen is the size of an InfoField in bytes. @@ -85,26 +87,43 @@ func (inf *InfoField) DecodeFromBytes(raw []byte) (err error) { // @ preserves acc(inf, R10) // @ preserves slices.AbsSlice_Bytes(b, 0, InfoLen) // @ ensures err == nil +// @ ensures inf.ToIntermediateAbsInfoField() == +// @ BytesToIntermediateAbsInfoField(b, 0, 0, InfoLen) // @ decreases func (inf *InfoField) SerializeTo(b []byte) (err error) { if len(b) < InfoLen { return serrors.New("buffer for InfoField too short", "expected", InfoLen, "actual", len(b)) } + //@ ghost targetAbsInfo := inf.ToIntermediateAbsInfoField() //@ unfold slices.AbsSlice_Bytes(b, 0, InfoLen) b[0] = 0 if inf.ConsDir { b[0] |= 0x1 } + //@ ghost tmpInfo1 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ bits.InfoFieldFirstByteSerializationLemmas() + //@ assert tmpInfo1.ConsDir == targetAbsInfo.ConsDir + //@ ghost firstByte := b[0] if inf.Peer { b[0] |= 0x2 } + //@ tmpInfo2 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo2.Peer == (b[0] & 0x2 == 0x2) + //@ assert tmpInfo2.ConsDir == (b[0] & 0x1 == 0x1) + //@ assert tmpInfo2.Peer == targetAbsInfo.Peer + //@ assert tmpInfo2.ConsDir == tmpInfo1.ConsDir + //@ assert tmpInfo2.ConsDir == targetAbsInfo.ConsDir b[1] = 0 // reserved //@ assert &b[2:4][0] == &b[2] && &b[2:4][1] == &b[3] binary.BigEndian.PutUint16(b[2:4], inf.SegID) + //@ ghost tmpInfo3 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo3.UInfo == targetAbsInfo.UInfo //@ assert &b[4:8][0] == &b[4] && &b[4:8][1] == &b[5] //@ assert &b[4:8][2] == &b[6] && &b[4:8][3] == &b[7] binary.BigEndian.PutUint32(b[4:8], inf.Timestamp) + //@ ghost tmpInfo4 := BytesToIntermediateAbsInfoFieldHelper(b, 0, InfoLen) + //@ assert tmpInfo4.AInfo == targetAbsInfo.AInfo //@ fold slices.AbsSlice_Bytes(b, 0, InfoLen) return nil } @@ -112,11 +131,15 @@ func (inf *InfoField) SerializeTo(b []byte) (err error) { // UpdateSegID updates the SegID field by XORing the SegID field with the 2 // first bytes of the MAC. It is the beta calculation according to // https://docs.scion.org/en/latest/protocols/scion-header.html#hop-field-mac-computation +// @ requires hf.HVF == AbsMac(hfMac) // @ preserves acc(&inf.SegID) +// @ ensures AbsUInfoFromUint16(inf.SegID) == +// @ old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf)) // @ decreases -func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte) { +func (inf *InfoField) UpdateSegID(hfMac [MacLen]byte /* @, ghost hf io.IO_HF @ */) { //@ share hfMac inf.SegID = inf.SegID ^ binary.BigEndian.Uint16(hfMac[:2]) + // @ AssumeForIO(AbsUInfoFromUint16(inf.SegID) == old(io.upd_uinfo(AbsUInfoFromUint16(inf.SegID), hf))) } // @ decreases diff --git a/pkg/slayers/path/infofield_spec.gobra b/pkg/slayers/path/infofield_spec.gobra new file mode 100644 index 000000000..b0da954d4 --- /dev/null +++ b/pkg/slayers/path/infofield_spec.gobra @@ -0,0 +1,127 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import ( + "verification/io" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + . "verification/utils/definitions" +) + +ghost const MetaLen = 4 + +ghost +decreases +pure func InfoFieldOffset(currINF, headerOffset int) int { + return headerOffset + MetaLen + InfoLen * currINF +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func ConsDir(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x1 == 0x1 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func Peer(raw []byte, currINF int, headerOffset int) bool { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + raw[InfoFieldOffset(currINF, headerOffset)] & 0x2 == 0x2 +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func Timestamp(raw []byte, currINF int, headerOffset int) io.IO_ainfo { + return let idx := InfoFieldOffset(currINF, headerOffset) + 4 in + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall i int :: { &raw[idx+i] } { &raw[idx:idx+4][i] } 0 <= i && i < 4 ==> + &raw[idx+i] == &raw[idx:idx+4][i]) in + io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) +} + +ghost +requires 0 <= currINF && 0 <= headerOffset +requires InfoFieldOffset(currINF, headerOffset) + InfoLen < len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func AbsUinfo(raw []byte, currINF int, headerOffset int) set[io.IO_msgterm] { + return let idx := InfoFieldOffset(currINF, headerOffset) + 2 in + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&raw[idx:idx+2][k]} 0 <= k && k < 2 ==> + &raw[idx:idx+4][k] == &raw[idx + k]) in + AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[idx:idx+2])) +} + +// This type simplifies the infoField, making it easier +// to use than the IO_seg3 from the IO-spec. +type IntermediateAbsInfoField adt { + IntermediateAbsInfoField_ { + AInfo io.IO_ainfo + UInfo set[io.IO_msgterm] + ConsDir bool + Peer bool + } +} + +ghost +requires 0 <= start && start <= middle +requires middle+InfoLen <= end && end <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, start, end), _) +decreases +pure func BytesToIntermediateAbsInfoField(raw [] byte, start int, middle int, end int) (IntermediateAbsInfoField) { + return unfolding acc(sl.AbsSlice_Bytes(raw, start, end), _) in + BytesToIntermediateAbsInfoFieldHelper(raw, middle, end) +} + +ghost +requires 0 <= middle +requires middle+InfoLen <= end && end <= len(raw) +requires forall i int :: { &raw[i] } middle <= i && i < end ==> + acc(&raw[i], _) +decreases +pure func BytesToIntermediateAbsInfoFieldHelper(raw [] byte, middle int, end int) (IntermediateAbsInfoField) { + return let _ := Asserting(forall k int :: {&raw[middle+2:middle+4][k]} 0 <= k && k < 2 ==> &raw[middle+2:middle+4][k] == &raw[middle+2 + k]) in + let _ := Asserting(forall k int :: {&raw[middle+4:middle+8][k]} 0 <= k && k < 4 ==> &raw[middle+4:middle+8][k] == &raw[middle+4 + k]) in + IntermediateAbsInfoField(IntermediateAbsInfoField_{ + AInfo : io.IO_ainfo(binary.BigEndian.Uint32(raw[middle+4:middle+8])), + UInfo : AbsUInfoFromUint16(binary.BigEndian.Uint16(raw[middle+2:middle+4])), + ConsDir : raw[middle] & 0x1 == 0x1, + Peer : raw[middle] & 0x2 == 0x2, + }) +} + +ghost +decreases +pure func (inf InfoField) ToIntermediateAbsInfoField() (IntermediateAbsInfoField) { + return IntermediateAbsInfoField(IntermediateAbsInfoField_{ + AInfo : io.IO_ainfo(inf.Timestamp), + UInfo : AbsUInfoFromUint16(inf.SegID), + ConsDir : inf.ConsDir, + Peer : inf.Peer, + }) +} \ No newline at end of file diff --git a/pkg/slayers/path/io_msgterm_spec.gobra b/pkg/slayers/path/io_msgterm_spec.gobra new file mode 100644 index 000000000..41e39093d --- /dev/null +++ b/pkg/slayers/path/io_msgterm_spec.gobra @@ -0,0 +1,46 @@ +// Copyright 2020 Anapaya Systems +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package path + +import "verification/io" + +// At the moment, we assume that all cryptographic operations performed at the code level +// imply the desired properties at the IO spec level because we cannot currently prove in +// Gobra the correctness of these operations. Given that we do not prove any properties +// about this function, we currently do not provide a definition for it. + +ghost +decreases +pure func AbsUInfoFromUint16(SegID uint16) set[io.IO_msgterm] + +ghost +decreases +pure func AbsMac(mac [MacLen]byte) (io.IO_msgterm) + +// The following function converts a slice with at least `MacLen` elements into +// an (exclusive) array containing the mac. Note that there are no permissions +// involved for accessing exclusive arrays. This functions is abstract for now +// because Gobra does not allow for array literals in pure functions, even though +// they are no more side-effectful than creating an instance of a struct type. +// This will soon be fixed in Gobra. +ghost +requires MacLen <= len(mac) +requires forall i int :: { &mac[i] } 0 <= i && i < MacLen ==> acc(&mac[i], _) +ensures len(res) == MacLen +ensures forall i int :: { res[i] } 0 <= i && i < MacLen ==> mac[i] == res[i] +decreases +pure func FromSliceToMacArray(mac []byte) (res [MacLen]byte) diff --git a/pkg/slayers/path/scion/base.go b/pkg/slayers/path/scion/base.go index a41185eab..d74fe0c09 100644 --- a/pkg/slayers/path/scion/base.go +++ b/pkg/slayers/path/scion/base.go @@ -259,18 +259,7 @@ type MetaHdr struct { // @ preserves acc(m) // @ preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) // @ ensures (len(raw) >= MetaLen) == (e == nil) -// @ ensures e == nil ==> ( -// @ MetaLen <= len(raw) && -// @ 0 <= m.CurrINF && m.CurrINF <= 3 && -// @ 0 <= m.CurrHF && m.CurrHF < 64 && -// @ m.SegsInBounds() && -// @ let lenR := len(raw) in -// @ let b0 := sl.GetByte(raw, 0, lenR, 0) in -// @ let b1 := sl.GetByte(raw, 0, lenR, 1) in -// @ let b2 := sl.GetByte(raw, 0, lenR, 2) in -// @ let b3 := sl.GetByte(raw, 0, lenR, 3) in -// @ let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in -// @ DecodedFrom(line) == *m) +// @ ensures e == nil ==> m.DecodeFromBytesSpec(raw) // @ ensures e != nil ==> e.ErrorMem() // @ decreases func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { @@ -300,16 +289,11 @@ func (m *MetaHdr) DecodeFromBytes(raw []byte) (e error) { // @ preserves acc(m, R50) // @ preserves sl.AbsSlice_Bytes(b, 0, len(b)) // @ ensures e == nil -// @ ensures let lenR := len(b) in -// @ let b0 := sl.GetByte(b, 0, lenR, 0) in -// @ let b1 := sl.GetByte(b, 0, lenR, 1) in -// @ let b2 := sl.GetByte(b, 0, lenR, 2) in -// @ let b3 := sl.GetByte(b, 0, lenR, 3) in -// @ let v := m.SerializedToLine() in -// @ binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +// @ ensures m.SerializeToSpec(b) // @ decreases func (m *MetaHdr) SerializeTo(b []byte) (e error) { if len(b) < MetaLen { + // @ Unreachable() return serrors.New("buffer for MetaHdr too short", "expected", MetaLen, "actual", len(b)) } line := uint32(m.CurrINF)<<30 | uint32(m.CurrHF&0x3F)<<24 diff --git a/pkg/slayers/path/scion/base_spec.gobra b/pkg/slayers/path/scion/base_spec.gobra index b12aec326..48faeebcf 100644 --- a/pkg/slayers/path/scion/base_spec.gobra +++ b/pkg/slayers/path/scion/base_spec.gobra @@ -20,6 +20,8 @@ import ( "encoding/binary" "github.com/scionproto/scion/pkg/slayers/path" sl "github.com/scionproto/scion/verification/utils/slices" + + . "github.com/scionproto/scion/verification/utils/definitions" ) pred (b *Base) NonInitMem() { @@ -73,9 +75,8 @@ pure func (b Base) ValidCurrIdxsSpec() bool { (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> b.PathMeta.SegLen[i] != 0) && (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> - b.PathMeta.SegLen[i] == 0) - // Surprisingly, the following does not seem to be needed - // b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) + b.PathMeta.SegLen[i] == 0) && + b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) } ghost @@ -142,6 +143,13 @@ pure func (s *Base) GetMetaHdr() MetaHdr { return unfolding acc(s.Mem(), _) in s.PathMeta } +ghost +requires acc(s.Mem(), _) +decreases +pure func (s *Base) GetBase() Base { + return unfolding acc(s.Mem(), _) in *s +} + ghost requires acc(s.Mem(), _) decreases @@ -180,6 +188,145 @@ pure func (s Base) IncPathSpec() (res Base) { } } +/*************** AbsBase ***************/ + +// There is a good deal of repition in this section of the file and the similarly +// named functions for the type `Base`. While for now this is not too big of a problem, +// we should find ways to streamline the definitions, ideally by having these defs only +// for the abstraction type only. + +type AbsBase adt { + AbsBase_ { + PathMeta AbsMetaHdr + NumINF int + NumHops int + } +} + +type AbsMetaHdr adt { + AbsMetaHdr_ { + // we should change the types of CurrINF and CurrHF to wider types, + // otherwise we might start getting overflow errors here when they + // are fully enabled. + CurrINF uint8 + CurrHF uint8 + SegLen seq[uint8] + } +} + +ghost +decreases +pure func (b Base) Abs() AbsBase { + return AbsBase_{ + PathMeta: b.PathMeta.Abs(), + NumINF: b.NumINF, + NumHops: b.NumHops, + } +} + +ghost +decreases +pure func (b MetaHdr) Abs() AbsMetaHdr { + return AbsMetaHdr_{ + CurrINF: b.CurrINF, + CurrHF: b.CurrHF, + SegLen: seq[uint8]{ b.SegLen[0], b.SegLen[1], b.SegLen[2] }, + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseSpec() AbsBase { + return AbsBase_ { + PathMeta: b.ReverseMetaHdrSpec(), + NumINF: b.NumINF, + NumHops: b.NumHops, + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseMetaHdrSpec() AbsMetaHdr { + return AbsMetaHdr_ { + CurrINF: uint8(b.NumINF) - b.PathMeta.CurrINF - 1, + CurrHF: uint8(b.NumHops) - b.PathMeta.CurrHF - 1, + SegLen: b.ReverseSegLen(), + } +} + +ghost +requires len(b.PathMeta.SegLen) == 3 +decreases +pure func (b AbsBase) ReverseSegLen() seq[uint8] { + return (match b.NumINF { + case 2: seq[uint8]{ b.PathMeta.SegLen[1], b.PathMeta.SegLen[0], b.PathMeta.SegLen[2]} + case 3: seq[uint8]{ b.PathMeta.SegLen[2], b.PathMeta.SegLen[1], b.PathMeta.SegLen[0] } + default: b.PathMeta.SegLen + }) +} + +ghost +decreases +pure func (b AbsBase) ValidCurrIdxsSpec() bool { + return 0 <= b.NumINF && b.NumINF <= MaxINFs && + len(b.PathMeta.SegLen) == 3 && + 0 <= b.NumHops && b.NumHops <= MaxHops && + b.ValidCurrHfSpec() && + b.ValidCurrInfSpec() && + 0 <= b.PathMeta.SegLen[0] && b.PathMeta.SegLen[0] < MaxHops && + 0 <= b.PathMeta.SegLen[1] && b.PathMeta.SegLen[1] < MaxHops && + 0 <= b.PathMeta.SegLen[2] && b.PathMeta.SegLen[2] < MaxHops && + (b.NumINF == 1 ==> b.NumHops == int(b.PathMeta.SegLen[0])) && + (b.NumINF == 2 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1])) && + (b.NumINF == 3 ==> b.NumHops == int(b.PathMeta.SegLen[0] + b.PathMeta.SegLen[1] + b.PathMeta.SegLen[2])) && + (forall i int :: { b.PathMeta.SegLen[i] } 0 <= i && i < b.NumINF ==> + b.PathMeta.SegLen[i] != 0) && + (forall i int :: { b.PathMeta.SegLen[i] } b.NumINF <= i && i < MaxINFs ==> + b.PathMeta.SegLen[i] == 0) && + b.PathMeta.CurrINF == b.InfForHfSpec(b.PathMeta.CurrHF) +} + +ghost +decreases +pure func (b AbsBase) ValidCurrInfSpec() bool { + return 0 <= b.PathMeta.CurrINF && b.PathMeta.CurrINF < b.NumINF +} + +ghost +decreases +pure func (b AbsBase) ValidCurrHfSpec() bool { + return 0 <= b.PathMeta.CurrHF && b.PathMeta.CurrHF < b.NumHops +} + +ghost +requires len(s.PathMeta.SegLen) == 3 +ensures 0 <= r && r < 3 +decreases +pure func (s AbsBase) InfForHfSpec(hf uint8) (r uint8) { + return hf < s.PathMeta.SegLen[0] ? + 0 : + (hf < s.PathMeta.SegLen[0] + s.PathMeta.SegLen[1] ? 1 : 2) +} + +ghost +requires b.ValidCurrIdxsSpec() +ensures b.ReverseSpec().ValidCurrIdxsSpec() +decreases +pure func (b AbsBase) ReversingValidBaseIsValidBase() Lemma { + return Lemma{} +} + +ghost +ensures b.ValidCurrIdxsSpec() == b.Abs().ValidCurrIdxsSpec() +decreases +pure func (b Base) ValidBaseHasValidAbs() Lemma { + return Lemma{} +} + +/*************** End of AbsBase ***************/ + ghost requires b.Mem() ensures b.NonInitMem() @@ -199,6 +346,23 @@ pure func DecodedFrom(line uint32) MetaHdr { } } +ghost +requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +decreases +pure func (m MetaHdr) DecodeFromBytesSpec(b []byte) bool { + return MetaLen <= len(b) && + 0 <= m.CurrINF && m.CurrINF <= 3 && + 0 <= m.CurrHF && m.CurrHF < 64 && + m.SegsInBounds() && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let line := binary.BigEndian.Uint32Spec(b0, b1, b2, b3) in + DecodedFrom(line) == m +} + ghost decreases pure func (m MetaHdr) SegsInBounds() bool { @@ -217,6 +381,20 @@ pure func (m MetaHdr) SerializedToLine() uint32 { uint32(m.SegLen[2] & 0x3F) } +ghost +requires acc(sl.AbsSlice_Bytes(b, 0, len(b)), _) +decreases +pure func (m MetaHdr) SerializeToSpec(b []byte) bool { + return MetaLen <= len(b) && + let lenR := len(b) in + let b0 := sl.GetByte(b, 0, lenR, 0) in + let b1 := sl.GetByte(b, 0, lenR, 1) in + let b2 := sl.GetByte(b, 0, lenR, 2) in + let b3 := sl.GetByte(b, 0, lenR, 3) in + let v := m.SerializedToLine() in + binary.BigEndian.PutUint32Spec(b0, b1, b2, b3, v) +} + ghost decreases pure func (m MetaHdr) InBounds() bool { @@ -227,6 +405,42 @@ pure func (m MetaHdr) InBounds() bool { 0 <= m.SegLen[2] && m.SegLen[2] <= 63 } +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s Base) EqAbsHeader(ub []byte) bool { + // we compute the sublice ub[:MetaLen] inside this function instead + // of expecting the correct subslice to be passed, otherwise this function + // becomes too cumbersome to use in calls from (*Raw).EqAbsHeader due to the + // lack of a folding expression. Same goes for MetaHdr.EqAbsHeader. + return MetaLen <= len(ub) && + s == RawBytesToBase(ub) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +pure func (s MetaHdr) EqAbsHeader(ub []byte) bool { + return MetaLen <= len(ub) && + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])) +} + +ghost +opaque +requires MetaLen <= idx && idx <= len(ub) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R55) +ensures s.EqAbsHeader(ub) == s.EqAbsHeader(ub[:idx]) +decreases +pure func (s MetaHdr) EqAbsHeaderForSublice(ub []byte, idx int) Lemma { + return let _ := Asserting(ub[:MetaLen] === ub[:idx][:MetaLen]) in + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) in + unfolding acc(sl.AbsSlice_Bytes(ub[:idx], 0, idx), R56) in + let _ := Asserting(s.EqAbsHeader(ub) == (s == DecodedFrom(binary.BigEndian.Uint32(ub[:MetaLen])))) in + Lemma{} +} + /** Lemma proven in /VerifiedSCION/verification/utils/bitwise/proofs.dfy **/ ghost requires m.InBounds() diff --git a/pkg/slayers/path/scion/decoded.go b/pkg/slayers/path/scion/decoded.go index 195125e6e..764a63f4d 100644 --- a/pkg/slayers/path/scion/decoded.go +++ b/pkg/slayers/path/scion/decoded.go @@ -218,7 +218,20 @@ func (s *Decoded) SerializeTo(b []byte /*@, ghost ubuf []byte @*/) (r error) { // @ decreases func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ ghost isValid := s.ValidCurrIdxs(ubuf) - //@ ghost base := s.GetBase(ubuf) + /*@ + ghost base := s.GetBase(ubuf) + ghost absBase := base.Abs() + ghost absMetaHdrAferReversingSegLen := AbsMetaHdr_ { + CurrINF: absBase.PathMeta.CurrINF, + CurrHF: absBase.PathMeta.CurrHF, + SegLen: absBase.ReverseSegLen(), + } + ghost absBaseAfterReversingSegLen := AbsBase_ { + PathMeta: absMetaHdrAferReversingSegLen, + NumINF: absBase.NumINF, + NumHops: absBase.NumHops, + } + @*/ //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() if s.NumINF == 0 { @@ -234,19 +247,12 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { s.PathMeta.SegLen[0], s.PathMeta.SegLen[lastIdx] = s.PathMeta.SegLen[lastIdx], s.PathMeta.SegLen[0] } //@ fold s.Base.Mem() - //@ fold s.Mem(ubuf) - - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( - //@ unfold s.Mem(ubuf) //@ invariant acc(s.Base.Mem(), R10) //@ invariant 0 <= i && i <= s.Base.GetNumINF() //@ invariant acc(&s.InfoFields, R10) //@ invariant len(s.InfoFields) == s.Base.GetNumINF() - //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> (acc(&s.InfoFields[i].ConsDir)) - //@ invariant isValid ==> s.Base.ValidCurrIdxs() + //@ invariant forall i int :: { &s.InfoFields[i] } 0 <= i && i < len(s.InfoFields) ==> + //@ (acc(&s.InfoFields[i].ConsDir)) //@ decreases MaxINFs-i // Reverse cons dir flags for i := 0; i < ( /*@ unfolding acc(s.Base.Mem(), R11) in @*/ s.NumINF); i++ { @@ -254,13 +260,12 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { info.ConsDir = !info.ConsDir } //@ fold s.Mem(ubuf) - //@ ) // Reverse order of hop fields //@ invariant s.Mem(ubuf) //@ invariant 0 <= i && i <= s.GetNumHops(ubuf) //@ invariant -1 <= j && j < s.GetNumHops(ubuf) - //@ invariant isValid ==> s.ValidCurrIdxs(ubuf) + //@ invariant s.GetBase(ubuf).Abs() == absBaseAfterReversingSegLen //@ decreases j-i for i, j := 0, ( /*@ unfolding s.Mem(ubuf) in (unfolding s.Base.Mem() in @*/ s.NumHops - 1 /*@ ) @*/); i < j; i, j = i+1, j-1 { //@ unfold s.Mem(ubuf) @@ -275,17 +280,15 @@ func (s *Decoded) Reverse( /*@ ghost ubuf []byte @*/ ) (p path.Path, r error) { //@ fold s.Mem(ubuf) } // Update CurrINF and CurrHF and SegLens - //@ preserves s.Mem(ubuf) - //@ preserves isValid ==> s.ValidCurrIdxs(ubuf) - //@ decreases - //@ outline( //@ unfold s.Mem(ubuf) //@ unfold s.Base.Mem() s.PathMeta.CurrINF = uint8(s.NumINF) - s.PathMeta.CurrINF - 1 s.PathMeta.CurrHF = uint8(s.NumHops) - s.PathMeta.CurrHF - 1 + //@ assert s.Base.Abs() == absBase.ReverseSpec() + //@ ghost if isValid { absBase.ReversingValidBaseIsValidBase() } + //@ assert isValid ==> s.Base.Abs().ValidCurrIdxsSpec() //@ fold s.Base.Mem() //@ fold s.Mem(ubuf) - //@ ) return s, nil } diff --git a/pkg/slayers/path/scion/raw.go b/pkg/slayers/path/scion/raw.go index 825115cd7..8f3d24e5e 100644 --- a/pkg/slayers/path/scion/raw.go +++ b/pkg/slayers/path/scion/raw.go @@ -22,6 +22,7 @@ import ( "github.com/scionproto/scion/pkg/slayers/path" //@ . "github.com/scionproto/scion/verification/utils/definitions" //@ sl "github.com/scionproto/scion/verification/utils/slices" + //@ io "verification/io" ) // Raw is a raw representation of the SCION (data-plane) path type. It is designed to parse as @@ -217,10 +218,13 @@ func (s *Raw) ToDecoded( /*@ ghost ubuf []byte @*/ ) (d *Decoded, err error) { // IncPath increments the path and writes it to the buffer. // @ requires s.Mem(ubuf) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires s.EqAbsHeader(ubuf) +// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) // @ ensures old(unfolding s.Mem(ubuf) in unfolding // @ s.Base.Mem() in (s.NumINF <= 0 || int(s.PathMeta.CurrHF) >= s.NumHops-1)) ==> r != nil // @ ensures r == nil ==> s.Mem(ubuf) +// @ ensures r == nil && s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) // @ ensures r != nil ==> s.NonInitMem() // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -230,12 +234,38 @@ func (s *Raw) IncPath( /*@ ghost ubuf []byte @*/ ) (r error) { //@ fold s.NonInitMem() return err } - //@ fold s.Mem(ubuf) - //@ s.RawIdxPerm(ubuf, MetaLen, writePerm) - //@ unfold acc(s.Base.Mem(), 1/2) + //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) + //@ sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, HalfPerm) + //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) + //@ sl.Reslice_Bytes(ubuf, 0, MetaLen, HalfPerm) + + //@ unfold acc(s.Base.Mem(), R2) err := s.PathMeta.SerializeTo(s.Raw[:MetaLen]) - //@ fold acc(s.Base.Mem(), 1/2) - //@ s.UndoRawIdxPerm(ubuf, MetaLen, writePerm) + //@ ghost if s.PathMeta.InBounds() { + //@ v := s.Raw[:MetaLen] + //@ b0 := sl.GetByte(v, 0, MetaLen, 0) + //@ b1 := sl.GetByte(v, 0, MetaLen, 1) + //@ b2 := sl.GetByte(v, 0, MetaLen, 2) + //@ b3 := sl.GetByte(v, 0, MetaLen, 3) + //@ s.PathMeta.SerializeAndDeserializeLemma(b0, b1, b2, b3) + //@ } + //@ assert s.PathMeta.InBounds() ==> s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) + //@ fold acc(s.Base.Mem(), R3) + + //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, R2) + //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, R2) + //@ fold acc(s.Mem(ubuf), R2) + //@ assert s.InBounds(ubuf) == s.PathMeta.InBounds() + //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(ubuf) + //@ s.PathMeta.EqAbsHeaderForSublice(ubuf, MetaLen) + //@ assert s.EqAbsHeader(ubuf) == s.PathMeta.EqAbsHeader(s.Raw[:MetaLen]) + //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) + + //@ sl.Unslice_Bytes(ubuf, 0, MetaLen, 1-R2) + //@ sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), MetaLen, 1-R2) + //@ fold acc(s.Base.Mem(), R3) + //@ fold acc(s.Mem(ubuf), 1-R2) + //@ assert s.InBounds(ubuf) ==> s.EqAbsHeader(ubuf) return err } @@ -272,7 +302,7 @@ func (s *Raw) GetInfoField(idx int /*@, ghost ubuf []byte @*/) (ifield path.Info // GetCurrentInfoField is a convenience method that returns the current hop field pointed to by the // CurrINF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) // @ ensures (r == nil) == (s.GetCurrINF(ubuf) < s.GetNumINF(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -289,30 +319,63 @@ func (s *Raw) GetCurrentInfoField( /*@ ghost ubuf []byte @*/ ) (res path.InfoFie } // SetInfoField updates the InfoField at a given index. -// @ requires 0 <= idx -// @ preserves acc(s.Mem(ubuf), R20) -// @ preserves sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) -// @ ensures r != nil ==> r.ErrorMem() +// @ requires 0 <= idx +// @ requires sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ requires acc(s.Mem(ubuf), R20) +// pres for IO: +// @ requires dp.Valid() && validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ ensures acc(s.Mem(ubuf), R20) +// @ ensures sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)) +// @ ensures r != nil ==> r.ErrorMem() +// posts for IO: +// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ validPktMetaHdr(ubuf) && s.EqAbsHeader(ubuf) +// @ ensures r == nil && idx == int(old(s.GetCurrINF(ubuf))) ==> +// @ let oldPkt := old(s.absPkt(dp, ubuf)) in +// @ let newPkt := AbsSetInfoField(oldPkt, info.ToIntermediateAbsInfoField()) in +// @ s.absPkt(dp, ubuf) == newPkt // @ decreases -func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte @*/) (r error) { +func (s *Raw) SetInfoField(info path.InfoField, idx int /*@, ghost ubuf []byte, ghost dp io.DataPlaneSpec@*/) (r error) { //@ share info - //@ unfold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) + //@ ghost oldCurrINF := int(old(s.GetCurrINF(ubuf))) + //@ unfold acc(s.Mem(ubuf), R50) + //@ unfold acc(s.Base.Mem(), R50) if idx >= s.NumINF { err := serrors.New("InfoField index out of bounds", "max", s.NumINF-1, "actual", idx) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) return err } infOffset := MetaLen + idx*path.InfoLen - //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), writePerm) + //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) + //@ assert idx == oldCurrINF ==> s.EqAbsHeader(ubuf) + + //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, len(s.Raw)) + //@ sl.SplitRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf[:len(s.Raw)]).ValidCurrIdxsSpec() + //@ assert sl.AbsSlice_Bytes(s.Raw, 0, len(s.Raw)) - //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) + //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert acc(sl.AbsSlice_Bytes(s.Raw, 0, infOffset), HalfPerm) + //@ sl.Reslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) + //@ ValidPktMetaHdrSublice(s.Raw, infOffset) + //@ sl.SplitRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(s.Raw[:infOffset]).ValidCurrIdxsSpec() + ret := info.SerializeTo(s.Raw[infOffset : infOffset+path.InfoLen]) - //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, writePerm) - //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), writePerm) - //@ fold acc(s.Base.Mem(), R20) - //@ fold acc(s.Mem(ubuf), R20) + //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ ValidPktMetaHdrSublice(ubuf, infOffset) + + //@ sl.Unslice_Bytes(s.Raw, 0, infOffset, HalfPerm/2) + //@ sl.CombineRange_Bytes(s.Raw, infOffset, infOffset+path.InfoLen, HalfPerm) + //@ assert idx == oldCurrINF ==> RawBytesToBase(ubuf).ValidCurrIdxsSpec() + //@ sl.CombineRange_Bytes(ubuf, 0, len(s.Raw), HalfPerm) + //@ fold acc(s.Base.Mem(), R50) + //@ fold acc(s.Mem(ubuf), R50) + //@ assert idx == oldCurrINF ==> reveal validPktMetaHdr(ubuf) + //@ TemporaryAssumeForIO(idx == oldCurrINF ==> s.absPkt(dp, ubuf) == AbsSetInfoField(old(s.absPkt(dp, ubuf)), info.ToIntermediateAbsInfoField())) return ret } @@ -349,7 +412,7 @@ func (s *Raw) GetHopField(idx int /*@, ghost ubuf []byte @*/) (res path.HopField // GetCurrentHopField is a convenience method that returns the current hop field pointed to by the // CurrHF index in the path meta header. // @ preserves acc(s.Mem(ubuf), R8) -// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R1) +// @ preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R2) // @ ensures (r == nil) == (s.GetCurrHF(ubuf) < s.GetNumHops(ubuf)) // @ ensures r != nil ==> r.ErrorMem() // @ decreases @@ -418,12 +481,13 @@ func (s *Raw) IsPenultimateHop( /*@ ghost ubuf []byte @*/ ) bool { } // IsLastHop returns whether the current hop is the last hop on the path. -// @ preserves acc(s.Mem(ubuf), R20) +// @ preserves acc(s.Mem(ubuf), R40) +// @ ensures res == s.IsLastHopSpec(ubuf) // @ decreases -func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) bool { - //@ unfold acc(s.Mem(ubuf), R20) - //@ defer fold acc(s.Mem(ubuf), R20) - //@ unfold acc(s.Base.Mem(), R20) - //@ defer fold acc(s.Base.Mem(), R20) +func (s *Raw) IsLastHop( /*@ ghost ubuf []byte @*/ ) (res bool) { + //@ unfold acc(s.Mem(ubuf), R40) + //@ defer fold acc(s.Mem(ubuf), R40) + //@ unfold acc(s.Base.Mem(), R40) + //@ defer fold acc(s.Base.Mem(), R40) return int(s.PathMeta.CurrHF) == (s.NumHops - 1) } diff --git a/pkg/slayers/path/scion/raw_spec.gobra b/pkg/slayers/path/scion/raw_spec.gobra index da1a0d05a..848f1d808 100644 --- a/pkg/slayers/path/scion/raw_spec.gobra +++ b/pkg/slayers/path/scion/raw_spec.gobra @@ -18,8 +18,10 @@ package scion import ( "github.com/scionproto/scion/pkg/slayers/path" - . "github.com/scionproto/scion/verification/utils/definitions" - sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "verification/dependencies/encoding/binary" + "verification/io" ) /**** Predicates ****/ @@ -91,8 +93,9 @@ func (s *Raw) IsFirstHopAfterXover(ghost ub []byte) (res bool) { * introduced this wrapper method which acts as a wrapper. */ preserves acc(s.Mem(ub), R9) +ensures res == s.GetIsXoverSpec(ub) decreases -func (s *Raw) IsXover(ghost ub []byte) bool { +func (s *Raw) IsXover(ghost ub []byte) (res bool) { unfold acc(s.Mem(ub), R9) defer fold acc(s.Mem(ub), R9) return s.Base.IsXover() @@ -122,6 +125,32 @@ pure func (s *Raw) ValidCurrIdxs(ghost ub []byte) bool { s.Base.ValidCurrIdxs() } +ghost +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +decreases +pure func (s *Raw) EqAbsHeader(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + s.Base.EqAbsHeader(ub) +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) GetIsXoverSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in s.Base.IsXoverSpec() +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) InBounds(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + s.PathMeta.InBounds() +} + /**** End of Stubs ****/ /**** Lemmas ****/ @@ -181,52 +210,6 @@ func (r *Raw) RawPerm(ubuf []byte, p perm) { } /******** End of Lemma: RawPerm ********/ -/******** Lemma: RawIdxPerm ********/ -pred (r *Raw) RawIdxPermRemainder(ubuf []byte, idx int, p perm) { - 0 < p && - acc(r.Base.Mem(), p/2) && - acc(&r.Raw, p/2) && - len(r.Raw) <= len(ubuf) && - r.Raw === ubuf[:len(r.Raw)] && - acc(sl.AbsSlice_Bytes(ubuf, idx, len(ubuf)), p) && - len(r.Raw) == r.Base.Len() && - idx <= len(r.Raw) -} - -ghost -requires 0 < p -requires acc(&r.Raw, p/2) -requires 0 <= idx && idx <= len(r.Raw) -requires acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -requires r.RawIdxPermRemainder(ubuf, idx, p) -ensures acc(r.Mem(ubuf), p) -ensures acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -decreases -func (r *Raw) UndoRawIdxPerm(ubuf []byte, idx int, p perm) { - unfold r.RawIdxPermRemainder(ubuf, idx, p) - sl.Unslice_Bytes(ubuf, 0, idx, p) - sl.CombineAtIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - fold acc(r.Mem(ubuf), p) -} - -ghost -requires 0 < p -requires acc(r.Mem(ubuf), p) -requires acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), p) -requires 0 <= idx && idx <= unfolding acc(r.Mem(ubuf), p) in len(r.Raw) -ensures acc(&r.Raw, p/2) -ensures r.Raw === old(unfolding acc(r.Mem(ubuf), p) in r.Raw) -ensures acc(sl.AbsSlice_Bytes(r.Raw[:idx], 0, idx), p) && acc(r.Base.Mem(), p/2) -ensures r.RawIdxPermRemainder(ubuf, idx, p) -decreases -func (r *Raw) RawIdxPerm(ubuf []byte, idx int, p perm) { - unfold acc(r.Mem(ubuf), p) - sl.SplitByIndex_Bytes(ubuf, 0, len(ubuf), idx, p) - sl.Reslice_Bytes(ubuf, 0, idx, p) - fold r.RawIdxPermRemainder(ubuf, idx, p) -} -/******** End of Lemma: RawIdxPerm ********/ - /******** Lemma: RawRangePerm ********/ pred (r *Raw) RawRangePermRemainder(ubuf []byte, start, end int, p perm) { 0 < p && @@ -338,4 +321,349 @@ decreases func (s *Raw) RawBufferNonInitMem() []byte { return unfolding acc(s.NonInitMem(), _) in s.Raw } -/**** End of helpful pure functions ****/ \ No newline at end of file +/**** End of helpful pure functions ****/ + +ghost +decreases +pure func NumInfoFields(seg1Len int, seg2Len int, seg3Len int) int { + return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) +} + +ghost +decreases +pure func HopFieldOffset(numINF int, currHF int, headerOffset int) int { + return path.InfoFieldOffset(numINF, headerOffset) + path.HopLen * currHF +} + +ghost +decreases +pure func pktLen(seg1Len int, seg2Len int, seg3Len int, headerOffset int) int { + return HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) + + path.HopLen * (seg1Len + seg2Len + seg3Len) +} + + +ghost +decreases +pure func LengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { + return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) +} + +ghost +requires 0 <= currHF +ensures res <= currHF +decreases +pure func LengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { + return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) +} + +ghost +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures len(res) == segLen - currHFIdx +decreases segLen - currHFIdx +pure func hopFields( + raw []byte, + offset int, + currHFIdx int, + segLen int) (res seq[io.IO_HF]) { + return currHFIdx == segLen ? seq[io.IO_HF]{} : + let hf := path.BytesToIO_HF(raw, 0, offset + path.HopLen * currHFIdx, len(raw)) in + seq[io.IO_HF]{hf} ++ hopFields(raw, offset, currHFIdx + 1, segLen) +} + +ghost +requires -1 <= currHFIdx && currHFIdx < len(hopfields) +ensures len(res) == currHFIdx + 1 +decreases currHFIdx + 1 +pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { + return currHFIdx == -1 ? + seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) +} + +ghost +requires 0 <= currHFIdx && currHFIdx <= len(hopfields) +ensures len(res) == len(hopfields) - currHFIdx +decreases len(hopfields) - currHFIdx +pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_HF]) { + return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : + seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) +} + +ghost +requires -1 <= currHFIdx && currHFIdx < len(hopfields) +ensures len(res) == currHFIdx + 1 +decreases currHFIdx + 1 +pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) (res seq[io.IO_ahi]) { + return currHFIdx == -1 ? seq[io.IO_ahi]{} : + seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) +} + +ghost +requires 0 <= offset +requires 0 < segLen +requires 0 <= currHFIdx && currHFIdx <= segLen +requires offset + path.HopLen * segLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures len(res.Future) == segLen - currHFIdx +ensures len(res.History) == currHFIdx +ensures len(res.Past) == currHFIdx +decreases +pure func segment(raw []byte, + offset int, + currHFIdx int, + ainfo io.IO_ainfo, + uinfo set[io.IO_msgterm], + consDir bool, + peer bool, + segLen int) (res io.IO_seg2) { + return let hopfields := hopFields(raw, offset, 0, segLen) in + io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields, currHFIdx - 1), + Future : segFuture(hopfields, currHFIdx), + History : segHistory(hopfields, currHFIdx - 1), + }) +} + +ghost +opaque +requires 0 <= headerOffset +requires path.InfoFieldOffset(currINFIdx, headerOffset) + path.InfoLen <= offset +requires 0 < segLen +requires offset + path.HopLen * segLen <= len(raw) +requires 0 <= currHFIdx && currHFIdx <= segLen +requires 0 <= currINFIdx && currINFIdx < 3 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func CurrSeg(raw []byte, + offset int, + currINFIdx int, + currHFIdx int, + segLen int, + headerOffset int) io.IO_seg3 { + return let ainfo := path.Timestamp(raw, currINFIdx, headerOffset) in + let consDir := path.ConsDir(raw, currINFIdx, headerOffset) in + let peer := path.Peer(raw, currINFIdx, headerOffset) in + let uinfo := path.AbsUinfo(raw, currINFIdx, headerOffset) in + segment(raw, offset, currHFIdx, ainfo, uinfo, consDir, peer, segLen) +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires 1 <= currINFIdx && currINFIdx < 4 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func LeftSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 1 && seg2Len > 0) ? + some(reveal CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, seg2Len, headerOffset)) : + ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(reveal CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires -1 <= currINFIdx && currINFIdx < 2 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RightSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, seg2Len, headerOffset)) : + (currINFIdx == 0 && seg2Len > 0) ? + some(CurrSeg(raw, offset, currINFIdx, seg1Len, seg1Len, headerOffset)) : + none[io.IO_seg3] +} + +ghost +opaque +requires 0 <= headerOffset +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires pktLen(seg1Len, seg2Len, seg3Len, headerOffset) <= len(raw) +requires 2 <= currINFIdx && currINFIdx < 5 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func MidSeg( + raw []byte, + currINFIdx int, + seg1Len int, + seg2Len int, + seg3Len int, + headerOffset int) option[io.IO_seg3] { + return let offset := HopFieldOffset(NumInfoFields(seg1Len, seg2Len, seg3Len), 0, headerOffset) in + (currINFIdx == 4 && seg2Len > 0) ? + some(CurrSeg(raw, offset, 0, seg1Len, seg1Len, headerOffset)) : + ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? + some(CurrSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, seg3Len, headerOffset)) : + none[io.IO_seg3]) +} + +ghost +opaque +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires validPktMetaHdr(raw) +decreases +pure func (s *Raw) absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { + return let _ := reveal validPktMetaHdr(raw) in + let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in binary.BigEndian.Uint32(raw[:MetaLen])) in + let metaHdr := DecodedFrom(hdr) in + let currINFIdx := int(metaHdr.CurrINF) in + let currHFIdx := int(metaHdr.CurrHF) in + let seg1Len := int(metaHdr.SegLen[0]) in + let seg2Len := int(metaHdr.SegLen[1]) in + let seg3Len := int(metaHdr.SegLen[2]) in + let segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := HopFieldOffset(numINF, 0, 0) in + io.IO_pkt2(io.IO_Packet2{ + CurrSeg : CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0), + LeftSeg : LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, 0), + MidSeg : MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, 0), + RightSeg : RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, 0), + }) +} + +// In the future, this should probably use AbsMetaHdr as +// the return type. +ghost +requires MetaLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToMetaHdr(raw []byte) MetaHdr { + return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let hdr := binary.BigEndian.Uint32(raw[:MetaLen]) in + DecodedFrom(hdr) +} + +// In the future, this should probably use AbsBase as +// the return type. +ghost +requires MetaLen <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func RawBytesToBase(raw []byte) Base { + return let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + Base{metaHdr, NumInfoFields(seg1, seg2, seg3), seg1 + seg2 + seg3} +} + +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func validPktMetaHdr(raw []byte) bool { + return MetaLen <= len(raw) && + let metaHdr := RawBytesToMetaHdr(raw) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let base := RawBytesToBase(raw) in + 0 < metaHdr.SegLen[0] && + base.ValidCurrIdxsSpec() && + pktLen(seg1, seg2, seg3, 0) <= len(raw) +} + +ghost +requires MetaLen <= idx && idx <= len(raw) +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +preserves acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) +ensures RawBytesToMetaHdr(raw) == RawBytesToMetaHdr(raw[:idx]) +ensures RawBytesToBase(raw) == RawBytesToBase(raw[:idx]) +decreases +func ValidPktMetaHdrSublice(raw []byte, idx int) { + reveal validPktMetaHdr(raw) + reveal validPktMetaHdr(raw[:idx]) + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) + assert forall i int :: { &raw[:MetaLen][i] } 0 <= i && i < MetaLen ==> + &raw[:MetaLen][i] == &raw[:idx][:MetaLen][i] + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:idx], 0, idx), R56) +} + +ghost +decreases +pure func AbsSetInfoField(oldPkt io.IO_pkt2, info path.IntermediateAbsInfoField) (newPkt io.IO_pkt2) { + return let newCurrSeg := io.IO_seg3(io.IO_seg3_{ + info.AInfo, + info.UInfo, + info.ConsDir, + info.Peer, + oldPkt.CurrSeg.Past, + oldPkt.CurrSeg.Future, + oldPkt.CurrSeg.History}) in + io.IO_pkt2(io.IO_Packet2{newCurrSeg, oldPkt.LeftSeg, oldPkt.MidSeg, oldPkt.RightSeg}) +} + +ghost +requires acc(s.Mem(ub), _) +decreases +pure func (s *Raw) IsLastHopSpec(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + unfolding acc(s.Base.Mem(), _) in + int(s.PathMeta.CurrHF) == (s.NumHops - 1) +} + +ghost +preserves acc(s.Mem(ubuf), R55) +preserves s.IsLastHopSpec(ubuf) +preserves acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) +preserves dp.Valid() +preserves validPktMetaHdr(ubuf) +preserves s.EqAbsHeader(ubuf) +ensures len(s.absPkt(dp, ubuf).CurrSeg.Future) == 1 +decreases +func (s *Raw) LastHopLemma(ubuf []byte, dp io.DataPlaneSpec) { + reveal validPktMetaHdr(ubuf) + hdr := (unfolding acc(sl.AbsSlice_Bytes(ubuf, 0, len(ubuf)), R56) in + binary.BigEndian.Uint32(ubuf[:MetaLen])) + metaHdr := DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := HopFieldOffset(numINF, 0, 0) + pkt := reveal s.absPkt(dp, ubuf) + assert pkt.CurrSeg == reveal CurrSeg(ubuf, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, 0) + assert len(pkt.CurrSeg.Future) == 1 +} \ No newline at end of file diff --git a/pkg/slayers/scion.go b/pkg/slayers/scion.go index 77f5d1aac..2f0a44735 100644 --- a/pkg/slayers/scion.go +++ b/pkg/slayers/scion.go @@ -320,6 +320,11 @@ func (s *SCION) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeO // @ preserves acc(sl.AbsSlice_Bytes(data, 0, len(data)), R40) // @ preserves df != nil && df.Mem() // @ ensures res == nil ==> s.Mem(data) +// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> +// @ ValidPktMetaHdr(data) +// @ ensures res == nil && typeOf(s.GetPath(data)) == *scion.Raw ==> +// @ s.EqAbsHeader(data) +// @ ensures res == nil ==> s.EqPathType(data) // @ ensures res != nil ==> s.NonInitMem() && res.ErrorMem() // @ decreases func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res error) { @@ -424,6 +429,9 @@ func (s *SCION) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) (res er // @ sl.CombineRange_Bytes(data, offset, offset+pathLen, R40) // @ fold s.Mem(data) + // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> ValidPktMetaHdr(data)) + // @ TemporaryAssumeForIO(typeOf(s.GetPath(data)) == *scion.Raw ==> s.EqAbsHeader(data)) + // @ TemporaryAssumeForIO(s.EqPathType(data)) return nil } diff --git a/pkg/slayers/scion_spec.gobra b/pkg/slayers/scion_spec.gobra index 632535619..01bd159e6 100644 --- a/pkg/slayers/scion_spec.gobra +++ b/pkg/slayers/scion_spec.gobra @@ -27,8 +27,9 @@ import ( "github.com/scionproto/scion/pkg/slayers/path/onehop" "github.com/scionproto/scion/pkg/slayers/path/scion" - . "github.com/scionproto/scion/verification/utils/definitions" - "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" + sl "verification/utils/slices" + "encoding/binary" ) pred PathPoolMem(pathPool []path.Path, pathPoolRaw path.Path) { @@ -231,8 +232,8 @@ pred (s *SCION) ChecksumMem() { acc(&s.RawSrcAddr) && acc(&s.RawDstAddr) && len(s.RawSrcAddr) % 2 == 0 && len(s.RawDstAddr) % 2 == 0 && acc(&s.SrcIA) && acc(&s.DstIA) && - slices.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && - slices.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) + sl.AbsSlice_Bytes(s.RawSrcAddr, 0, len(s.RawSrcAddr)) && + sl.AbsSlice_Bytes(s.RawDstAddr, 0, len(s.RawDstAddr)) } pred (b *BaseLayer) Mem(ghost ub []byte, ghost breakPoint int) { @@ -349,6 +350,116 @@ func (s *SCION) GetPath(ub []byte) path.Path { return unfolding acc(s.Mem(ub), _) in s.Path } +ghost +opaque +pure +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +func (s *SCION) EqAbsHeader(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + let low := CmnHdrLen+s.AddrHdrLenSpecInternal() in + let high := s.HdrLen*LineLen in + GetAddressOffset(ub) == low && + GetLength(ub) == int(high) && + // Might be worth introducing EqAbsHeader as an interface method on Path + // to avoid doing these casts, especially when we add support for EPIC. + typeOf(s.Path) == (*scion.Raw) && + unfolding acc(s.Path.Mem(ub[low:high]), _) in + unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + let _ := Asserting(forall k int :: {&ub[low:high][k]} 0 <= k && k < high ==> + &ub[low:high][k] == &ub[low + k]) in + let _ := Asserting(forall k int :: {&ub[low:high][:scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> + &ub[low:high][:scion.MetaLen][k] == &ub[low:high][k]) in + s.Path.(*scion.Raw).Base.GetMetaHdr() == + scion.DecodedFrom(binary.BigEndian.Uint32(ub[low:high][:scion.MetaLen])) +} + +// Checks if the common path header is valid in the serialized scion packet. +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func ValidPktMetaHdr(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let start := GetAddressOffset(raw) in + let end := start+scion.MetaLen in + 0 <= start && end <= len(raw) && + let rawHdr := raw[start:end] in + let length := GetLength(raw) in + length <= len(raw) && + unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + let _ := Asserting(forall k int :: {&rawHdr[k]} 0 <= k && k < scion.MetaLen ==> &rawHdr[k] == &raw[start + k]) in + let hdr := binary.BigEndian.Uint32(rawHdr) in + let metaHdr := scion.DecodedFrom(hdr) in + let seg1 := int(metaHdr.SegLen[0]) in + let seg2 := int(metaHdr.SegLen[1]) in + let seg3 := int(metaHdr.SegLen[2]) in + let base := scion.Base{metaHdr, scion.NumInfoFields(seg1, seg2, seg3), seg1+seg2+seg3} in + metaHdr.InBounds() && + 0 < metaHdr.SegLen[0] && + base.ValidCurrIdxsSpec() && + scion.pktLen(seg1, seg2, seg3, start) <= length +} + +ghost +opaque +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +decreases +pure func IsSupportedPkt(raw []byte) bool { + return CmnHdrLen <= len(raw) && + let pathType := path.Type(GetPathType(raw)) in + let nextHdr := L4ProtocolType(GetNextHdr(raw)) in + pathType == scion.PathType && + nextHdr != L4SCMP +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetAddressOffset(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in + let dstAddrLen := AddrType(ub[9] >> 4 & 0x7).Length() in + let srcAddrLen := AddrType(ub[9] & 0x7).Length() in + CmnHdrLen + 2*addr.IABytes + dstAddrLen + srcAddrLen +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetLength(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[5])*LineLen +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetPathType(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[8]) +} + +ghost +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +requires CmnHdrLen <= len(ub) +decreases +pure func GetNextHdr(ub []byte) int { + return unfolding acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) in int(ub[4]) +} + +ghost +opaque +requires acc(s.Mem(ub), _) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), _) +decreases +pure func (s *SCION) EqPathType(ub []byte) bool { + return unfolding acc(s.Mem(ub), _) in + path.Type(GetPathType(ub)) == s.PathType && + L4ProtocolType(GetNextHdr(ub)) == s.NextHdr +} + ghost pure requires acc(s.Mem(ub), _) diff --git a/router/dataplane.go b/router/dataplane.go index 4c9391ac5..2e69a0c3a 100644 --- a/router/dataplane.go +++ b/router/dataplane.go @@ -137,11 +137,10 @@ type BatchConn interface { // @ ensures err != nil ==> prophecyM == 0 // @ ensures err == nil ==> prophecyM == n // @ ensures io.token(old(MultiReadBioNext(place, prophecyM))) - // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, ifsToIO_ifs(ingressID))) + // @ ensures old(MultiReadBioCorrectIfs(place, prophecyM, path.ifsToIO_ifs(ingressID))) // @ ensures err == nil ==> - // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> - // @ unfolding acc(msgs[i].Mem(), _) in absIO_val(dp, msgs[i].Buffers[0], ingressID) == - // @ old(MultiReadBioIO_val(place, n)[i]) + // @ forall i int :: { &msgs[i] } 0 <= i && i < n ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old(MultiReadBioIO_val(place, n)[i]) // TODO (Markus): uint16 or option[io.IO_ifs] for ingress ReadBatch(msgs underlayconn.Messages /*@, ghost ingressID uint16, ghost prophecyM int, ghost place io.Place, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires acc(addr.Mem(), _) @@ -154,10 +153,19 @@ type BatchConn interface { // (VerifiedSCION) opted for less reusable spec for WriteBatch for // performance reasons. // @ requires len(msgs) == 1 - // @ preserves acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ requires acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() + // @ ensures acc(msgs[0].Mem(), R50) && msgs[0].HasActiveAddr() // @ ensures err == nil ==> 0 <= n && n <= len(msgs) // @ ensures err != nil ==> err.ErrorMem() - WriteBatch(msgs underlayconn.Messages, flags int) (n int, err error) + // contracts for IO-spec + // @ requires dp.Valid() + // @ requires MsgToAbsVal(dp, &msgs[0], egressID) == ioAbsPkts + // @ requires io.token(place) && io.CBioIO_bio3s_send(place, ioAbsPkts) + // @ ensures dp.Valid() + // (VerifiedSCION) the permission to the protocol must always be returned, otherwise the router could not continue + // after failing to send a packet. + // @ ensures io.token(old(io.dp3s_iospec_bio3s_send_T(place, ioAbsPkts))) + WriteBatch(msgs underlayconn.Messages, flags int /*@, ghost egressID uint16, ghost place io.Place, ghost ioAbsPkts io.IO_val, ghost dp io.DataPlaneSpec @*/) (n int, err error) // @ requires Mem() // @ ensures err != nil ==> err.ErrorMem() // @ decreases @@ -744,6 +752,7 @@ func (d *DataPlane) AddNextHopBFD(ifID uint16, src, dst *net.UDPAddr, cfg contro // @ requires dp.Valid() // @ requires d.DpAgreesWithSpec(dp) // @ requires io.token(place) && dp.dp3s_iospec_ordered(state, place) +// @ #backend[moreJoins()] func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost state io.IO_dp3s_state_local, ghost dp io.DataPlaneSpec @*/) error { // @ share d, ctx d.mtx.Lock() @@ -803,8 +812,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ requires let d := *dPtr in // @ d.DpAgreesWithSpec(dp) // @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ #backend[moreJoins()] func /*@ rc @*/ (ingressID uint16, rd BatchConn, dPtr **DataPlane /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) { - // @ ghost ioIngressID := ifsToIO_ifs(ingressID) d := *dPtr msgs := conn.NewReadMessages(inputBatchCnt) // @ requires forall i int :: { &msgs[i] } 0 <= i && i < len(msgs) ==> @@ -867,8 +876,11 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant ingressID in d.getDomForwardingMetrics() // @ invariant acc(rd.Mem(), _) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !> + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() for d.running { + // @ ghost ioIngressID := path.ifsToIO_ifs(ingressID) // Multi recv event // @ ghost ioLock.Lock() // @ unfold SharedInv!< dp, ioSharedArg !>() @@ -876,14 +888,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ ghost numberOfReceivedPacketsProphecy := AllocProphecy() // @ ExtractMultiReadBio(dp, t, numberOfReceivedPacketsProphecy, s) // @ MultiUpdateElemWitness(t, numberOfReceivedPacketsProphecy, ioIngressID, s, ioSharedArg) - // @ ghost ioValSeq := MultiReadBioIO_val(t,numberOfReceivedPacketsProphecy) + // @ ghost ioValSeq := MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy) // @ ghost sN := MultiReadBioUpd(t, numberOfReceivedPacketsProphecy, s) // @ ghost tN := MultiReadBioNext(t, numberOfReceivedPacketsProphecy) // @ assert dp.dp3s_iospec_ordered(sN, tN) + // @ BeforeReadBatch: pkts, err := rd.ReadBatch(msgs /*@, ingressID, numberOfReceivedPacketsProphecy, t , dp @*/) + // @ assert old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)) == ioValSeq + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ ioValSeq[i] == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // @ ghost *ioSharedArg.State = sN // @ ghost *ioSharedArg.Place = tN + // @ assert err == nil ==> + // @ forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == old[BeforeReadBatch](MultiReadBioIO_val(t, numberOfReceivedPacketsProphecy)[i]) // @ MultiElemWitnessConv(ioSharedArg.IBufY, ioIngressID, ioValSeq) // @ fold SharedInv!< dp, ioSharedArg !>() // @ ioLock.Unlock() @@ -905,6 +927,8 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ !msgs[i].HasWildcardPermAddr() // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) + // @ assert forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] // (VerifiedSCION) using regular for loop instead of range loop to avoid unnecessary // complications with permissions @@ -927,6 +951,15 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ invariant forall i int :: { &msgs[i] } 0 <= i && i < pkts ==> // @ msgs[i].GetN() <= len(msgs[i].GetFstBuffer()) // @ invariant processor.sInit() && processor.sInitD() === d + // @ invariant processor.getIngressID() == ingressID + // contracts for IO-spec + // @ invariant pkts <= len(ioValSeq) + // @ invariant d.DpAgreesWithSpec(dp) && dp.Valid() + // @ invariant ioIngressID == path.ifsToIO_ifs(ingressID) + // @ invariant acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; + // @ invariant forall i int :: { &msgs[i] } i0 <= i && i < pkts ==> + // @ MsgToAbsVal(dp, &msgs[i], ingressID) == ioValSeq[i] + // @ invariant MultiElemWitnessWithIndex(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) // @ decreases pkts - i0 for i0 := 0; i0 < pkts; i0++ { // @ assert &msgs[:pkts][i0] == &msgs[i0] @@ -959,12 +992,24 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta // @ assert p.Buffers === m.Buffers // @ assert acc(&p.Buffers[0]) // @ assert p.N <= len(p.Buffers[0]) - // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, writePerm) + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) tmpBuf := p.Buffers[0][:p.N] + // @ ghost absPktTmpBuf := absIO_val(dp, tmpBuf, ingressID) + // @ ghost absPktBuf0 := absIO_val(dp, msgs[i0].Buffers[0], ingressID) + // @ assert msgs[i0] === p + // @ absIO_valWidenLemma(dp, p.Buffers[0], ingressID, p.N) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf === absPktBuf0 + // @ MultiElemWitnessStep(ioSharedArg.IBufY, ioIngressID, ioValSeq, i0) + // @ assert ioValSeq[i0].isIO_val_Pkt2 ==> + // @ ElemWitness(ioSharedArg.IBufY, ioIngressID, ioValSeq[i0].IO_val_Pkt2_2) + // @ assert absPktTmpBuf.isIO_val_Pkt2 ==> absPktTmpBuf == ioValSeq[i0] + // @ assert path.ifsToIO_ifs(processor.getIngressID()) == ioIngressID + // @ sl.SplitRange_Bytes(p.Buffers[0], 0, p.N, HalfPerm) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, p.N) // @ assert sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)) - result, err /*@ , addrAliasesPkt @*/ := processor.processPkt(tmpBuf, srcAddr) + result, err /*@ , addrAliasesPkt, newAbsPkt @*/ := processor.processPkt(tmpBuf, srcAddr /*@, ioLock, ioSharedArg, dp @*/) // @ fold scmpErr.Mem() + switch { case err == nil: // @ unfold scmpErr.Mem() @@ -1012,6 +1057,13 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta continue } + // (VerifiedSCION) we currently have this assumption because we cannot think of a sound way to capture + // the behaviour of errors.As(...) in our specifications. Nonetheless, we checked extensively that, when + // processPkt does not return an error or returns an scmpError (and thus errors.As(err, &scmpErr) succeeds), + // result.OutPkt is always non-nil. For the other kinds of errors, the result is nil, but that branch is killed + // before this point. + // @ assume result.OutPkt != nil + // Write to OutConn; drop the packet if this would block. // Use WriteBatch because it's the only available function that // supports MSG_DONTWAIT. @@ -1025,8 +1077,26 @@ func (d *DataPlane) Run(ctx context.Context /*@, ghost place io.Place, ghost sta writeMsgs[0].Addr = result.OutAddr } // @ sl.NilAcc_Bytes() + // @ assert absIO_val(dp, result.OutPkt, result.EgressID) == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) + // @ assert result.OutPkt != nil ==> newAbsPkt == absIO_val(dp, writeMsgs[0].Buffers[0], result.EgressID) // @ fold acc(writeMsgs[0].Mem(), R50) - _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT) + + // @ ghost ioLock.Lock() + // @ unfold SharedInv!< dp, ioSharedArg !>() + // @ ghost t, s := *ioSharedArg.Place, *ioSharedArg.State + // @ ghost if(newAbsPkt.isIO_val_Pkt2) { + // @ ApplyElemWitness(s.obuf, ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) + // @ assert newAbsPkt.IO_val_Pkt2_2 in AsSet(s.obuf[newAbsPkt.IO_val_Pkt2_1]) + // @ assert dp.dp3s_iospec_bio3s_send_guard(s, t, newAbsPkt) + // @ } else { assert newAbsPkt.isIO_val_Unsupported } + // @ unfold dp.dp3s_iospec_ordered(s, t) + // @ unfold dp.dp3s_iospec_bio3s_send(s, t) + // @ io.TriggerBodyIoSend(newAbsPkt) + // @ ghost tN := io.dp3s_iospec_bio3s_send_T(t, newAbsPkt) + _, err = result.OutConn.WriteBatch(writeMsgs, syscall.MSG_DONTWAIT /*@, result.EgressID, t, newAbsPkt, dp @*/) + // @ ghost *ioSharedArg.Place = tN + // @ fold SharedInv!< dp, ioSharedArg !>() + // @ ghost ioLock.Unlock() // @ unfold acc(writeMsgs[0].Mem(), R50) // @ ghost if addrAliasesPkt && result.OutAddr != nil { // @ apply acc(result.OutAddr.Mem(), R15) --* acc(sl.AbsSlice_Bytes(tmpBuf, 0, len(tmpBuf)), R15) @@ -1293,7 +1363,7 @@ type processResult struct { } // @ requires acc(d.Mem(), _) && d.getMacFactory() != nil -// @ ensures res.sInit() && res.sInitD() == d +// @ ensures res.sInit() && res.sInitD() == d && res.getIngressID() == ingressID // @ decreases func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcessor) { var verScionTmp gopacket.SerializeBuffer @@ -1322,6 +1392,7 @@ func newPacketProcessor(d *DataPlane, ingressID uint16) (res *scionPacketProcess // @ preserves p.sInit() // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitRawPkt() == nil // @ ensures p.sInitPath() == nil // @ ensures p.sInitHopField() == path.HopField{} @@ -1353,10 +1424,12 @@ func (p *scionPacketProcessor) reset() (err error) { // @ acc(d.Mem(), _) && // @ d.WellConfigured() && // @ d.getValSvc() != nil && -// @ d.getValForwardingMetrics() != nil +// @ d.getValForwardingMetrics() != nil && +// @ d.DpAgreesWithSpec(dp) // @ ensures p.sInit() // @ ensures acc(p.sInitD().Mem(), _) // @ ensures p.sInitD() == old(p.sInitD()) +// @ ensures p.getIngressID() == old(p.getIngressID()) // @ ensures p.sInitD().validResult(respr, addrAliasesPkt) // @ ensures acc(sl.AbsSlice_Bytes(rawPkt, 0, len(rawPkt)), 1 - R15) // @ ensures addrAliasesPkt ==> ( @@ -1366,16 +1439,30 @@ func (p *scionPacketProcessor) reset() (err error) { // @ ensures respr.OutPkt !== rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, rawPkt, p.getIngressID()) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.getIngressID()), absPkt.IO_val_Pkt2_2) +// @ ensures dp.Valid() +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures reserr != nil && respr.OutPkt != nil ==> newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() +// @ #backend[moreJoins(1)] func (p *scionPacketProcessor) processPkt(rawPkt []byte, - srcAddr *net.UDPAddr) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { + srcAddr *net.UDPAddr /*@, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { if err := p.reset(); err != nil { // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } // @ assert p.sInitD().getValForwardingMetrics() != nil // @ unfold p.sInit() + // @ assert !p.segmentChange // @ ghost d := p.d p.rawPkt = rawPkt p.srcAddr = srcAddr @@ -1389,7 +1476,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, if err != nil { // @ fold p.sInit() // @ fold p.sInitD().validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, io.IO_val_Unit{} @*/ } /*@ ghost var ub []byte @@ -1438,7 +1525,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) - return processResult{}, p.processIntraBFD(pld) /*@, false @*/ + return processResult{}, p.processIntraBFD(pld) /*@, false, io.IO_val_Unit{} @*/ } // @ establishMemUnsupportedPathTypeNextHeader() // @ defer fold p.sInit() @@ -1446,7 +1533,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) return processResult{}, serrors.WithCtx(unsupportedPathTypeNextHeader, - "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false @*/ + "type", pathType, "header", nextHdr(p.lastLayer /*@, ub @*/)) /*@, false, io.IO_val_Unit{} @*/ case onehop.PathType: if p.lastLayer.NextLayerType( /*@ ub @*/ ) == layers.LayerTypeBFD { // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } @@ -1459,12 +1546,12 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, malformedPath /*@, false @*/ + return processResult{}, malformedPath /*@, false, io.IO_val_Unit{} @*/ } // @ defer fold p.sInit() // @ defer fold p.d.validResult(processResult{}, false) // @ ghost defer ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) - return processResult{}, p.processInterBFD(ohp, pld) /*@, false @*/ + return processResult{}, p.processInterBFD(ohp, pld) /*@, false, io.IO_val_Unit{} @*/ } // @ sl.CombineRange_Bytes(ub, start, end, writePerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { @@ -1473,10 +1560,12 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) // @ unfold acc(p.d.Mem(), _) - v1, v2 /*@, aliasesPkt @*/ := p.processOHP() + // @ TemporaryAssumeForIO(reveal p.scionLayer.EqPathType(p.rawPkt)) + // @ assert !(reveal slayers.IsSupportedPkt(p.rawPkt)) + v1, v2 /*@, aliasesPkt, newAbsPkt @*/ := p.processOHP( /* @ dp @ */ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, aliasesPkt @*/ + return v1, v2 /*@, aliasesPkt, newAbsPkt @*/ case scion.PathType: // @ sl.CombineRange_Bytes(ub, start, end, writePerm) // @ ghost if lastLayerIdx >= 0 && !offsets[lastLayerIdx].isNil { @@ -1484,15 +1573,22 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) // @ } // @ assert sl.AbsSlice_Bytes(p.rawPkt, 0, len(p.rawPkt)) - v1, v2 /*@ , addrAliasesPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd @*/ ) + // (VerifiedSCION) the following statements assume properties that follow directly + // from `decodeLayers`, but we cannot currently establish them because we cannot + // properly frame this yet around calls to the ghost slice operations. + // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(p.rawPkt)) + // @ TemporaryAssumeForIO((typeOf(p.scionLayer.GetPath(p.rawPkt)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(p.rawPkt)) + // @ TemporaryAssumeForIO(p.scionLayer.EqPathType(p.rawPkt)) + // @ TemporaryAssumeForIOWitness(absIO_val(dp, p.rawPkt, p.ingressID), p.ingressID, ioSharedArg) + v1, v2 /*@ , addrAliasesPkt, newAbsPkt @*/ := p.processSCION( /*@ p.rawPkt, ub == nil, llStart, llEnd, ioLock, ioSharedArg, dp @*/ ) // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, v2 == nil, hasHbhLayer, hasE2eLayer) // @ fold p.sInit() - return v1, v2 /*@, addrAliasesPkt @*/ + return v1, v2 /*@, addrAliasesPkt, newAbsPkt @*/ case epic.PathType: // @ TODO() v1, v2 := p.processEPIC() // @ fold p.sInit() - return v1, v2 /*@, false @*/ + return v1, v2 /*@, false, io.IO_val_Unit{} @*/ default: // @ ghost if mustCombineRanges { ghost defer sl.CombineRange_Bytes(p.rawPkt, o.start, o.end, writePerm) } // @ ResetDecodingLayers(&p.scionLayer, &p.hbhLayer, &p.e2eLayer, ubScionLayer, ubHbhLayer, ubE2eLayer, true, hasHbhLayer, hasE2eLayer) @@ -1500,7 +1596,7 @@ func (p *scionPacketProcessor) processPkt(rawPkt []byte, // @ fold p.d.validResult(processResult{}, false) // @ fold p.sInit() // @ establishMemUnsupportedPathType() - return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false @*/ + return processResult{}, serrors.WithCtx(unsupportedPathType, "type", pathType) /*@, false, io.IO_val_Unit{} @*/ } } @@ -1617,6 +1713,7 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ requires acc(&p.path) // @ requires p.scionLayer.Mem(ub) // @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -1624,14 +1721,15 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path) // @ ensures acc(&p.rawPkt, R1) @@ -1646,8 +1744,24 @@ func (p *scionPacketProcessor) processIntraBFD(data []byte) (res error) { // @ ensures respr.OutPkt !== ub && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> slayers.ValidPktMetaHdr(ub) +// @ requires (typeOf(p.scionLayer.GetPath(ub)) == *scion.Raw) ==> p.scionLayer.EqAbsHeader(ub) +// @ requires p.scionLayer.EqPathType(ub) +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, p.rawPkt, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { var ok bool // @ unfold acc(p.scionLayer.Mem(ub), R20) @@ -1658,9 +1772,9 @@ func (p *scionPacketProcessor) processSCION( /*@ ghost ub []byte, ghost llIsNil // @ p.scionLayer.DowngradePerm(ub) // @ establishMemMalformedPath() // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, io.IO_val_Unit{} @*/ } - return p.process( /*@ ub, llIsNil, startLL, endLL @*/ ) + return p.process( /*@ ub, llIsNil, startLL, endLL , ioLock, ioSharedArg, dp @*/ ) } // @ trusted @@ -1766,13 +1880,17 @@ type macBuffersT struct { } // @ trusted -// @ requires false +// @ requires false +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported func (p *scionPacketProcessor) packSCMP( typ slayers.SCMPType, code slayers.SCMPCode, scmpP gopacket.SerializableLayer, cause error, -) (processResult, error) { + /* @ ghost dp io.DataPlaneSpec, @*/ +) (respr processResult, reserr error) { // check invoking packet was an SCMP error: if p.lastLayer.NextLayerType() == slayers.LayerTypeSCMP { @@ -1795,7 +1913,8 @@ func (p *scionPacketProcessor) packSCMP( // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.hopField) && acc(&p.infoField) -// @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) +// @ ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R1) // @ ensures acc(&p.d, R50) // @ ensures acc(p.scionLayer.Mem(ub), R6) // @ ensures acc(&p.path, R20) @@ -1813,16 +1932,26 @@ func (p *scionPacketProcessor) packSCMP( // @ unfolding acc(p.scionLayer.Mem(ub), R10) in // @ p.path.GetCurrINF(ubPath) < p.path.GetNumINF(ubPath)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures dp.Valid() +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures respr.OutPkt == nil // @ decreases -func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { var err error // @ unfold acc(p.scionLayer.Mem(ub), R6) // @ defer fold acc(p.scionLayer.Mem(ub), R6) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ ghost ubPath := ub[startP:endP] - // @ sl.SplitRange_Bytes(ub, startP, endP, R1) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R1) + // @ sl.SplitRange_Bytes(ub, startP, endP, R2) + // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, R2) p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ) // @ fold p.d.validResult(processResult{}, false) if err != nil { @@ -1834,6 +1963,10 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // TODO(lukedirtwalker) parameter problem invalid path? return processResult{}, err } + // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub)) + // @ TemporaryAssumeForIO(len(absPkt(dp, ub).CurrSeg.Future) > 0) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) return processResult{}, nil } @@ -1844,8 +1977,12 @@ func (p *scionPacketProcessor) parsePath( /*@ ghost ub []byte @*/ ) (respr proce // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateHopExpiry( /*@ ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { expiration := util.SecsToTime(p.infoField.Timestamp). Add(path.ExpTimeToDuration(p.hopField.ExpTime)) expired := expiration.Before(time.Now()) @@ -1870,13 +2007,17 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("expired hop", "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF), + /*@ dp, @*/ ) } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R20) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() @@ -1884,8 +2025,16 @@ func (p *scionPacketProcessor) validateHopExpiry() (respr processResult, reserr // @ p.ingressID == 0 || p.hopField.ConsIngress == p.ingressID) // @ ensures reserr == nil && !p.infoField.ConsDir ==> ( // @ p.ingressID == 0 || p.hopField.ConsEgress == p.ingressID) +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr error) { +func (p *scionPacketProcessor) validateIngressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { pktIngressID := p.hopField.ConsIngress errCode := slayers.SCMPCodeUnknownHopFieldIngress if !p.infoField.ConsDir { @@ -1900,8 +2049,12 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.New("ingress interface invalid", "pkt_ingress", pktIngressID, "router_ingress", p.ingressID), + /*@ dp, @*/ ) } + // @ reveal p.EqAbsHopField(oldPkt) + // @ reveal p.EqAbsInfoField(oldPkt) + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) // @ fold p.d.validResult(respr, false) return processResult{}, nil } @@ -1910,19 +2063,33 @@ func (p *scionPacketProcessor) validateIngressID() (respr processResult, reserr // @ requires acc(p.scionLayer.Mem(ubScionL), R19) // @ requires acc(&p.path, R20) // @ requires p.path === p.scionLayer.GetPath(ubScionL) -// @ preserves acc(&p.ingressID, R20) +// @ preserves acc(&p.ingressID, R21) // @ ensures acc(p.scionLayer.Mem(ubScionL), R19) // @ ensures acc(&p.path, R20) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) && p.scionLayer.EqAbsHeader(ubScionL) +// @ ensures acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R20) +// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ubScionL) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ubScionL) +// @ ensures reserr == nil ==> p.LastHopLen(ubScionL, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { - // @ ghost ubPath := p.scionLayer.UBPath(ubScionL) +func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) + // @ ghost startP := p.scionLayer.PathStartIdx(ubScionL) + // @ ghost endP := p.scionLayer.PathEndIdx(ubScionL) + // @ ghost ubPath := ubScionL[startP:endP] + // @ sl.SplitRange_Bytes(ubScionL, startP, endP, R55) + // @ p.AbsPktToSubSliceAbsPkt(ubScionL, startP, endP, dp) + // @ ghost defer sl.CombineRange_Bytes(ubScionL, startP, endP, R55) // @ unfold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ defer fold acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R20) // @ p.d.getLocalIA() @@ -1935,48 +2102,65 @@ func (p *scionPacketProcessor) validateSrcDstIA( /*@ ghost ubScionL []byte @*/ ) // don't start with the first hop. if p.path.IsFirstHop( /*@ ubPath @*/ ) && !srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA() + return p.invalidSrcIA( /*@ dp @*/ ) } if dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA() + return p.invalidDstIA( /*@ dp @*/ ) } } else { // Inbound if srcIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidSrcIA() + return p.invalidSrcIA( /*@ dp @*/ ) } if p.path.IsLastHop( /*@ ubPath @*/ ) != dstIsLocal { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.invalidDstIA() + return p.invalidDstIA( /*@ dp @*/ ) } + // @ ghost if(p.path.IsLastHopSpec(ubPath)) { + // @ p.path.LastHopLemma(ubPath, dp) + // @ p.SubSliceAbsPktToAbsPkt(ubScionL, startP, endP, dp) + // @ } } // @ fold p.d.validResult(processResult{}, false) + + // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 + // @ assert (unfolding acc(p.scionLayer.Mem(ubScionL), R55) in + // @ (unfolding acc(p.scionLayer.HeaderMem(ubScionL[slayers.CmnHdrLen:]), R55) in + // @ p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.path.IsLastHopSpec(ubPath) + // @ assert reveal p.DstIsLocalIngressID(ubScionL) + // @ assert reveal p.LastHopLen(ubScionL, dp) return processResult{}, nil } // invalidSrcIA is a helper to return an SCMP error for an invalid SrcIA. // @ trusted +// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidSrcIA() (processResult, error) { +func (p *scionPacketProcessor) invalidSrcIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidSourceAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen + addr.IABytes)}, invalidSrcIA, + /*@ dp, @*/ ) } // invalidDstIA is a helper to return an SCMP error for an invalid DstIA. // @ trusted +// @ requires dp.Valid() // @ requires false -func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { +func (p *scionPacketProcessor) invalidDstIA( /*@ ghost dp io.DataPlaneSpec @*/ ) (processResult, error) { return p.packSCMP( slayers.SCMPTypeParameterProblem, slayers.SCMPCodeInvalidDestinationAddress, &slayers.SCMPParameterProblem{Pointer: uint16(slayers.CmnHdrLen)}, invalidDstIA, + /*@ dp, @*/ ) } @@ -1988,7 +2172,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ requires acc(&p.path, R15) // @ requires acc(p.scionLayer.Mem(ub), R4) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.ingressID, R21) // @ requires acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ requires let ubPath := p.scionLayer.UBPath(ub) in // @ unfolding acc(p.scionLayer.Mem(ub), R10) in @@ -2001,7 +2185,7 @@ func (p *scionPacketProcessor) invalidDstIA() (processResult, error) { // @ preserves acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R4) // @ ensures acc(&p.path, R15) // @ ensures acc(p.scionLayer.Mem(ub), R4) -// @ ensures acc(&p.ingressID, R20) +// @ ensures acc(&p.ingressID, R21) // @ ensures acc(&p.infoField, R4) && acc(&p.hopField, R4) // @ ensures acc(&p.d, R20) // @ ensures acc(&p.srcAddr, R20) @@ -2045,18 +2229,37 @@ func (p *scionPacketProcessor) validateTransitUnderlaySrc( /*@ ghost ub []byte @ } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.segmentChange, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) +// @ requires acc(&p.segmentChange, R20) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) +// @ ensures acc(&p.ingressID, R21) +// @ ensures acc(&p.segmentChange, R20) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures reserr == nil ==> respr === processResult{} // @ ensures reserr != nil ==> sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ requires !p.segmentChange ==> AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ requires p.segmentChange ==> AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) +// @ ensures reserr == nil && !p.segmentChange ==> AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) +// @ ensures reserr == nil && p.segmentChange ==> oldPkt.RightSeg != none[io.IO_seg2] && len(get(oldPkt.RightSeg).Past) > 0 +// @ ensures reserr == nil && p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr error) { - pktEgressID := p.egressInterface() +func (p *scionPacketProcessor) validateEgressID( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { + pktEgressID := p.egressInterface( /*@ oldPkt @*/ ) + // @ reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(pktEgressID)) // @ p.d.getInternalNextHops() // @ if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } _, ih := p.d.internalNextHops[pktEgressID] @@ -2074,25 +2277,34 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, + /*@ dp, @*/ ) } + // @ TemporaryAssumeForIO(pktEgressID != 0 && + // @ (io.IO_ifs(pktEgressID) in domain(dp.GetNeighborIAs()))) // @ p.d.getLinkTypesMem() ingress, egress := p.d.linkTypes[p.ingressID], p.d.linkTypes[pktEgressID] + // @ p.d.LinkTypesLemma(dp) if !p.segmentChange { // Check that the interface pair is valid within a single segment. // No check required if the packet is received from an internal interface. + // @ assert reveal AbsValidateIngressIDConstraint(oldPkt, path.ifsToIO_ifs(p.ingressID)) switch { case p.ingressID == 0: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Core && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Parent: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Parent && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraint(oldPkt, (p.ingressID != 0), dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: // malicious @@ -2102,19 +2314,23 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e slayers.SCMPCodeInvalidPath, // XXX(matzf) new code InvalidHop? &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress)) + "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) } } + // @ assert reveal AbsValidateIngressIDConstraintXover(oldPkt, path.ifsToIO_ifs(p.ingressID)) // Check that the interface pair is valid on a segment switch. // Having a segment change received from the internal interface is never valid. switch { case ingress == topology.Core && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Core: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil case ingress == topology.Child && egress == topology.Child: + // @ assert reveal AbsValidateEgressIDConstraintXover(oldPkt, dp) // @ fold p.d.validResult(respr, false) return processResult{}, nil default: @@ -2124,22 +2340,39 @@ func (p *scionPacketProcessor) validateEgressID() (respr processResult, reserr e slayers.SCMPCodeInvalidSegmentChange, &slayers.SCMPParameterProblem{Pointer: p.currentInfoPointer( /*@ nil @*/ )}, serrors.WithCtx(cannotRoute, "ingress_id", p.ingressID, "ingress_type", ingress, - "egress_id", pktEgressID, "egress_type", egress)) + "egress_id", pktEgressID, "egress_type", egress) /*@, dp, @*/) } } -// @ preserves acc(&p.infoField) +// @ requires acc(&p.infoField) // @ requires acc(&p.path, R20) // @ requires acc(p.scionLayer.Mem(ub), R19) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.hopField, R20) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ ensures acc(&p.infoField) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R19) // @ ensures err != nil ==> err.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ requires p.LastHopLen(ub, dp) +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures acc(&p.d, R55) && acc(p.d.Mem(), _) && acc(&p.ingressID, R55) +// @ ensures err == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures err == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures err == nil ==> absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) +// @ ensures err == nil ==> p.LastHopLen(ub, dp) +// @ ensures err == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures err == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) // @ decreases -func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte @*/ ) (err error) { +func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (err error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost start := p.scionLayer.PathStartIdx(ub) // @ ghost end := p.scionLayer.PathEndIdx(ub) @@ -2151,16 +2384,31 @@ func (p *scionPacketProcessor) updateNonConsDirIngressSegID( /*@ ghost ub []byte // means this comes from this AS itself, so nothing has to be done. // TODO(lukedirtwalker): For packets destined to peer links this shouldn't // be updated. + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) if !p.infoField.ConsDir && p.ingressID != 0 { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ reveal p.LastHopLen(ub, dp) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // (VerifiedSCION) the following property is guaranteed by the type system, but Gobra cannot infer it yet // @ assume 0 <= p.path.GetCurrINF(ubPath) - // @ sl.SplitRange_Bytes(ub, start, end, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, start, end, writePerm) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath @*/); err != nil { + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, start, end, dp) + // @ sl.SplitRange_Bytes(ub, start, end, HalfPerm) + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF) /*@ ) , ubPath , dp@*/); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, start, end, writePerm) return serrors.WrapStr("update info field", err) } - } + // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ p.SubSliceAbsPktToAbsPkt(ub, start, end, dp) + // @ ghost sl.CombineRange_Bytes(ub, start, end, HalfPerm) + // @ absPktFutureLemma(dp, ub) + // @ assert absPkt(dp, ub).CurrSeg.UInfo == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) + // @ assert reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ assert reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ assert reveal p.LastHopLen(ub, dp) + } + // @ assert absPkt(dp, ub) == reveal AbsUpdateNonConsDirIngressSegID(old(absPkt(dp, ub)), path.ifsToIO_ifs(p.ingressID)) return nil } @@ -2202,26 +2450,36 @@ func (p *scionPacketProcessor) currentHopPointer( /*@ ghost ubScionL []byte @*/ scion.MetaLen + path.InfoLen*p.path.NumINF + path.HopLen*int(p.path.PathMeta.CurrHF)) } +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) // @ preserves acc(&p.mac, R20) && p.mac != nil && p.mac.Mem() -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) // @ preserves acc(&p.macBuffers.scionInput, R20) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) // @ preserves acc(&p.d, R50) && acc(p.d.Mem(), _) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures len(p.cachedMac) == path.MACBufferSize // @ ensures sl.AbsSlice_Bytes(p.cachedMac, 0, len(p.cachedMac)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(oldPkt) +// @ requires p.EqAbsInfoField(oldPkt) +// @ ensures reserr == nil ==> AbsVerifyCurrentMACConstraint(oldPkt, dp) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr error) { +func (p *scionPacketProcessor) verifyCurrentMAC( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { fullMac := path.FullMAC(p.mac, p.infoField, p.hopField, p.macBuffers.scionInput) - // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R20) - // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R20) - // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R20) + // @ fold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ defer unfold acc(sl.AbsSlice_Bytes(p.hopField.Mac[:path.MacLen], 0, path.MacLen), R21) + // @ sl.SplitRange_Bytes(fullMac, 0, path.MacLen, R21) + // @ ghost defer sl.CombineRange_Bytes(fullMac, 0, path.MacLen, R21) if subtle.ConstantTimeCompare(p.hopField.Mac[:path.MacLen], fullMac[:path.MacLen]) == 0 { // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") return p.packSCMP( @@ -2234,12 +2492,19 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e "cons_dir", p.infoField.ConsDir, "if_id", p.ingressID, "curr_inf", p.path.PathMeta.CurrINF, "curr_hf", p.path.PathMeta.CurrHF, "seg_id", p.infoField.SegID), + /*@ dp, @*/ ) } // Add the full MAC to the SCION packet processor, // such that EPIC does not need to recalculate it. p.cachedMac = fullMac - + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) + // (VerifiedSCION) Assumptions for Cryptography: + // @ absInf := p.infoField.ToIntermediateAbsInfoField() + // @ absHF := p.hopField.ToIO_HF() + // @ AssumeForIO(dp.hf_valid(absInf.ConsDir, absInf.AInfo, absInf.UInfo, absHF)) + // @ reveal AbsVerifyCurrentMACConstraint(oldPkt, dp) // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } @@ -2260,8 +2525,12 @@ func (p *scionPacketProcessor) verifyCurrentMAC() (respr processResult, reserr e // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> !addrAliasesUb // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { +func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (resaddr *net.UDPAddr, respr processResult, reserr error /*@ , addrAliasesUb bool @*/) { // (VerifiedSCION) the parameter used to be p.scionLayer, // instead of &p.scionLayer. a, err /*@ , addrAliases @*/ := p.d.resolveLocalDst(&p.scionLayer /*@, ubScionL @*/) @@ -2275,7 +2544,7 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( r, err := p.packSCMP( slayers.SCMPTypeDestinationUnreachable, slayers.SCMPCodeNoRoute, - &slayers.SCMPDestinationUnreachable{}, err) + &slayers.SCMPDestinationUnreachable{}, err /*@, dp, @*/) return nil, r, err /*@ , false @*/ default: // @ fold p.d.validResult(respr, addrAliases) @@ -2286,30 +2555,47 @@ func (p *scionPacketProcessor) resolveInbound( /*@ ghost ubScionL []byte @*/ ) ( // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path === p.scionLayer.GetPath(ub) -// @ preserves acc(&p.infoField) -// @ preserves acc(&p.hopField, R20) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField) +// @ requires acc(&p.hopField, R20) +// @ ensures acc(&p.infoField) +// @ ensures acc(&p.hopField, R20) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) >= 0 +// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub))) // @ decreases -func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr error) { +func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ assert ub[startP:endP] === ubPath - // @ unfold p.scionLayer.Mem(ub) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ unfold acc(p.scionLayer.Mem(ub), R55) // we are the egress router and if we go in construction direction we // need to update the SegID. if p.infoField.ConsDir { - p.infoField.UpdateSegID(p.hopField.Mac) + p.infoField.UpdateSegID(p.hopField.Mac /*@, p.hopField.ToIO_HF() @*/) + // @ assert path.AbsUInfoFromUint16(p.infoField.SegID) == old(io.upd_uinfo(path.AbsUInfoFromUint16(p.infoField.SegID), p.hopField.ToIO_HF())) // @ assume 0 <= p.path.GetCurrINF(ubPath) - if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath @*/); err != nil { + if err := p.path.SetInfoField(p.infoField, int( /*@ unfolding acc(p.path.Mem(ubPath), R45) in (unfolding acc(p.path.Base.Mem(), R50) in @*/ p.path.PathMeta.CurrINF /*@ ) @*/) /*@ , ubPath, dp @*/); err != nil { // TODO parameter problem invalid path + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.path.DowngradePerm(ubPath) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) @@ -2318,58 +2604,99 @@ func (p *scionPacketProcessor) processEgress( /*@ ghost ub []byte @*/ ) (reserr } } if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ fold p.scionLayer.NonInitMem() // TODO parameter problem invalid path return serrors.WrapStr("incrementing path", err) } - // @ fold p.scionLayer.Mem(ub) + // @ fold acc(p.scionLayer.Mem(ub), R55) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsProcessEgress(old(absPkt(dp, ub)))) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return nil } // @ requires acc(&p.path, R20) // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) -// @ preserves acc(&p.segmentChange) && acc(&p.hopField) && acc(&p.infoField) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ preserves acc(&p.segmentChange) +// @ preserves acc(&p.hopField) +// @ preserves acc(&p.infoField) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) -// @ ensures reserr == nil ==> (p.scionLayer.Mem(ub) && p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) && p.scionLayer.GetPath(ub) === old(p.scionLayer.GetPath(ub))) +// @ ensures reserr == nil ==> p.scionLayer.Mem(ub) +// @ ensures reserr == nil ==> p.scionLayer.UBPath(ub) === old(p.scionLayer.UBPath(ub)) +// @ ensures reserr == nil ==> p.scionLayer.GetPath(ub) === old(p.scionLayer.GetPath(ub)) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures p.segmentChange // @ ensures respr === processResult{} // @ ensures reserr != nil ==> reserr.ErrorMem() +// contract for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.GetIsXoverSpec(ub) +// @ ensures reserr == nil ==> len(old(absPkt(dp, ub)).CurrSeg.Future) == 1 +// @ ensures reserr == nil ==> old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2] +// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0 +// @ ensures reserr == nil ==> len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0 +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub))) // @ decreases -func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) doXover( /*@ ghost ub []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { p.segmentChange = true - // @ unfold p.scionLayer.Mem(ub) - // @ ghost startP := int(slayers.CmnHdrLen + p.scionLayer.AddrHdrLen(nil, true)) - // @ ghost endP := int(p.scionLayer.HdrLen * slayers.LineLen) + // @ ghost startP := p.scionLayer.PathStartIdx(ub) + // @ ghost endP := p.scionLayer.PathEndIdx(ub) // @ ghost ubPath := ub[startP:endP] - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ub, startP, endP, writePerm) + + // @ unfold acc(p.scionLayer.Mem(ub), 1-R55) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ TemporaryAssumeForIO(len(old(absPkt(dp, ub)).CurrSeg.Future) == 1) + // @ reveal p.EqAbsInfoField(absPkt(dp, ub)) + // @ reveal p.EqAbsHopField(absPkt(dp, ub)) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ unfold acc(p.scionLayer.Mem(ub), R55) if err := p.path.IncPath( /*@ ubPath @*/ ); err != nil { // TODO parameter problem invalid path // TODO(joao): we currently expose a lot of internal information from slayers here. Can we avoid it? + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ unfold p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]) // @ p.scionLayer.PathPoolMemExchange(p.scionLayer.PathType, p.scionLayer.Path) // @ fold p.scionLayer.NonInitMem() return processResult{}, serrors.WrapStr("incrementing path", err) } + // @ fold acc(p.scionLayer.Mem(ub), R55) var err error if p.hopField, err = p.path.GetCurrentHopField( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.scionLayer.Mem(ub) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } if p.infoField, err = p.path.GetCurrentInfoField( /*@ ubPath @*/ ); err != nil { + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.scionLayer.Mem(ub) // @ p.scionLayer.DowngradePerm(ub) // TODO parameter problem invalid path return processResult{}, err } - // @ fold p.scionLayer.Mem(ub) + // @ ghost sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ TemporaryAssumeForIO(old(absPkt(dp, ub)).LeftSeg != none[io.IO_seg2]) + // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).Future) > 0) + // @ TemporaryAssumeForIO(len(get(old(absPkt(dp, ub)).LeftSeg).History) == 0) + // @ TemporaryAssumeForIO(slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub)) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == AbsDoXover(old(absPkt(dp, ub)))) + // @ fold acc(p.scionLayer.Mem(ub), 1-R55) return processResult{}, nil } @@ -2403,28 +2730,50 @@ func (p *scionPacketProcessor) ingressInterface( /*@ ghost ubPath []byte @*/ ) u return hop.ConsEgress } -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) +// @ requires acc(&p.infoField, R21) +// @ requires acc(&p.hopField, R21) +// @ ensures acc(&p.infoField, R21) +// @ ensures acc(&p.hopField, R21) +// contracts for IO-spec +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures p.EqAbsInfoField(oldPkt) +// @ ensures p.EqAbsHopField(oldPkt) +// @ ensures AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(egress)) // @ decreases -func (p *scionPacketProcessor) egressInterface() uint16 { +func (p *scionPacketProcessor) egressInterface( /*@ ghost oldPkt io.IO_pkt2 @*/ ) /*@ (egress @*/ uint16 /*@ ) @*/ { + // @ reveal p.EqAbsInfoField(oldPkt) + // @ reveal p.EqAbsHopField(oldPkt) if p.infoField.ConsDir { + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsEgress)) return p.hopField.ConsEgress } + // @ assert reveal AbsEgressInterfaceConstraint(oldPkt, path.ifsToIO_ifs(p.hopField.ConsIngress)) return p.hopField.ConsIngress } // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField, R20) -// @ preserves acc(&p.ingressID, R20) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField, R20) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField, R20) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ requires len(oldPkt.CurrSeg.Future) > 0 +// @ requires p.EqAbsInfoField(oldPkt) +// @ requires p.EqAbsHopField(oldPkt) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr error) { - egressID := p.egressInterface() +func (p *scionPacketProcessor) validateEgressUp( /*@ ghost oldPkt io.IO_pkt2, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { + egressID := p.egressInterface( /*@ oldPkt @ */ ) // @ p.d.getBfdSessionsMem() // @ ghost if p.d.bfdSessions != nil { unfold acc(accBfdSession(p.d.bfdSessions), _) } if v, ok := p.d.bfdSessions[egressID]; ok { @@ -2446,7 +2795,7 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e } } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down")) + return p.packSCMP(typ, 0, scmpP, serrors.New("bfd session down") /*@, dp @*/) } } // @ fold p.d.validResult(processResult{}, false) @@ -2458,25 +2807,42 @@ func (p *scionPacketProcessor) validateEgressUp() (respr processResult, reserr e // @ requires acc(p.scionLayer.Mem(ub), R10) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R21) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ ensures acc(&p.hopField) +// @ ensures acc(&p.ingressID, R21) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) // @ ensures acc(p.scionLayer.Mem(ub), R10) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R20) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires p.DstIsLocalIngressID(ub) +// @ requires p.LastHopLen(ub, dp) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.DstIsLocalIngressID(ub) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> p.LastHopLen(ub, dp) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2495,13 +2861,27 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh // @ defer fold acc(p.scionLayer.Mem(ub), R20) // (VerifiedSCION) the following is guaranteed by the type system, but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) + // @ reveal p.LastHopLen(ub, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ assert p.DstIsLocalIngressID(ub) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ assert dp.Valid() + // @ assert slayers.ValidPktMetaHdr(ub) + // @ assert reveal p.LastHopLen(ub, dp) + // @ assert p.scionLayer.EqAbsHeader(ub) /*@ ghost var ubLL []byte ghost if &p.scionLayer === p.lastLayer { @@ -2511,11 +2891,11 @@ func (p *scionPacketProcessor) handleIngressRouterAlert( /*@ ghost ub []byte, gh sl.NilAcc_Bytes() } else { ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) + sl.SplitRange_Bytes(ub, startLL, endLL, R1) + ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL @*/) + return p.handleSCMPTraceRouteRequest(p.ingressID /*@ , ubLL, dp @*/) } // @ preserves acc(&p.infoField, R20) @@ -2530,28 +2910,43 @@ func (p *scionPacketProcessor) ingressRouterAlertFlag() (res *bool) { // @ requires 0 <= startLL && startLL <= endLL && endLL <= len(ub) // @ requires acc(&p.path, R20) -// @ requires acc(p.scionLayer.Mem(ub), R14) +// @ requires acc(p.scionLayer.Mem(ub), R13) // @ requires p.path === p.scionLayer.GetPath(ub) // @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.infoField, R20) +// @ requires acc(&p.hopField) // @ preserves acc(&p.lastLayer, R19) // @ preserves p.lastLayer != nil // @ preserves (&p.scionLayer !== p.lastLayer && llIsNil) ==> // @ acc(p.lastLayer.Mem(nil), R15) // @ preserves (&p.scionLayer !== p.lastLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R15) -// @ preserves acc(&p.ingressID, R20) -// @ preserves acc(&p.infoField, R20) -// @ preserves acc(&p.hopField) +// @ preserves acc(&p.ingressID, R21) +// @ ensures acc(&p.infoField, R20) +// @ ensures acc(&p.hopField) +// @ ensures sl.AbsSlice_Bytes(ub, 0, len(ub)) // @ ensures acc(&p.path, R20) -// @ ensures acc(p.scionLayer.Mem(ub), R14) +// @ ensures acc(p.scionLayer.Mem(ub), R13) // @ ensures acc(&p.d, R20) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// constracts for IO-spec +// @ requires dp.Valid() && slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ requires len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ requires p.EqAbsHopField(absPkt(dp, ub)) +// @ requires p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) +// @ ensures reserr == nil ==> len(absPkt(dp, ub).CurrSeg.Future) > 0 +// @ ensures reserr == nil ==> p.EqAbsHopField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> p.EqAbsInfoField(absPkt(dp, ub)) +// @ ensures reserr == nil ==> absPkt(dp, ub) == old(absPkt(dp, ub)) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int , ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ ghost ubPath := p.scionLayer.UBPath(ub) // @ ghost startP := p.scionLayer.PathStartIdx(ub) // @ ghost endP := p.scionLayer.PathEndIdx(ub) @@ -2562,7 +2957,7 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // @ fold p.d.validResult(processResult{}, false) return processResult{}, nil } - egressID := p.egressInterface() + egressID := p.egressInterface( /*@ absPkt(dp, ub) @*/ ) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if _, ok := p.d.external[egressID]; !ok { @@ -2575,13 +2970,22 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho // (VerifiedSCION) the following is guaranteed by the type system, // but Gobra cannot prove it yet // @ assume 0 <= p.path.GetCurrHF(ubPath) - // @ sl.SplitRange_Bytes(ub, startP, endP, writePerm) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) + // @ p.AbsPktToSubSliceAbsPkt(ub, startP, endP, dp) + // @ sl.SplitRange_Bytes(ub, startP, endP, HalfPerm) if err := p.path.SetHopField(p.hopField, int( /*@ unfolding acc(p.path.Mem(ubPath), R50) in (unfolding acc(p.path.Base.Mem(), R55) in @*/ p.path.PathMeta.CurrHF /*@ ) @*/) /*@ , ubPath @*/); err != nil { // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("update hop field", err) } - // @ sl.CombineRange_Bytes(ub, startP, endP, writePerm) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(dp.Valid() && scion.validPktMetaHdr(ubPath) && p.path.EqAbsHeader(ubPath)) // postcondition of SetHopfield + // @ p.SubSliceAbsPktToAbsPkt(ub, startP, endP, dp) + // @ absPktFutureLemma(dp, ub) + // @ TemporaryAssumeForIO(p.EqAbsHopField(absPkt(dp, ub))) // postcondition of SetHopfield + // @ TemporaryAssumeForIO(p.EqAbsInfoField(absPkt(dp, ub))) + // @ sl.CombineRange_Bytes(ub, startP, endP, HalfPerm) + // @ TemporaryAssumeForIO(absPkt(dp, ub) == old(absPkt(dp, ub))) /*@ ghost var ubLL []byte ghost if &p.scionLayer === p.lastLayer { @@ -2591,14 +2995,14 @@ func (p *scionPacketProcessor) handleEgressRouterAlert( /*@ ghost ub []byte, gho sl.NilAcc_Bytes() } else { ubLL = ub[startLL:endLL] - sl.SplitRange_Bytes(ub, startLL, endLL, writePerm) - ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, writePerm) + sl.SplitRange_Bytes(ub, startLL, endLL, R1) + ghost defer sl.CombineRange_Bytes(ub, startLL, endLL, R1) } @*/ - return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL @*/) + return p.handleSCMPTraceRouteRequest(egressID /*@ , ubLL, dp @*/) } -// @ preserves acc(&p.infoField, R20) +// @ preserves acc(&p.infoField, R21) // @ ensures res == &p.hopField.IngressRouterAlert || res == &p.hopField.EgressRouterAlert // @ decreases func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { @@ -2610,18 +3014,22 @@ func (p *scionPacketProcessor) egressRouterAlertFlag() (res *bool) { // @ requires acc(&p.lastLayer, R20) // @ requires p.lastLayer != nil && acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ requires acc(&p.d, R20) && acc(p.d.Mem(), _) -// @ preserves sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)) +// @ requires acc(&p.d, R21) && acc(p.d.Mem(), _) +// @ preserves acc(sl.AbsSlice_Bytes(ubLastLayer, 0, len(ubLastLayer)), R1) // @ ensures acc(&p.lastLayer, R20) // @ ensures acc(p.lastLayer.Mem(ubLastLayer), R15) -// @ ensures acc(&p.d, R20) +// @ ensures acc(&p.d, R21) && acc(p.d.Mem(), _) // @ ensures p.d.validResult(respr, false) // @ ensures respr.OutPkt != nil ==> // @ reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( - interfaceID uint16 /*@ , ghost ubLastLayer []byte @*/) (respr processResult, reserr error) { + interfaceID uint16 /*@ , ghost ubLastLayer []byte, ghost dp io.DataPlaneSpec @*/) (respr processResult, reserr error) { if p.lastLayer.NextLayerType( /*@ ubLastLayer @*/ ) != slayers.LayerTypeSCMP { log.Debug("Packet with router alert, but not SCMP") @@ -2631,8 +3039,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( scionPld /*@ , start, end @*/ := p.lastLayer.LayerPayload( /*@ ubLastLayer @*/ ) // @ assert scionPld === ubLastLayer[start:end] || scionPld == nil // @ if scionPld == nil { sl.NilAcc_Bytes() } else { - // @ sl.SplitRange_Bytes(ubLastLayer, start, end, writePerm) - // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, writePerm) + // @ sl.SplitRange_Bytes(ubLastLayer, start, end, R1) + // @ ghost defer sl.CombineRange_Bytes(ubLastLayer, start, end, R1) // @ } // @ gopacket.AssertInvariantNilDecodeFeedback() var scmpH /*@@@*/ slayers.SCMP @@ -2652,8 +3060,8 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ fold scmpP.NonInitMem() // @ unfold scmpH.Mem(scionPld) // @ unfold scmpH.BaseLayer.Mem(scionPld, 4) - // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), writePerm) - // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), writePerm) + // @ sl.SplitRange_Bytes(scionPld, 4, len(scionPld), R1) + // @ ghost defer sl.CombineRange_Bytes(scionPld, 4, len(scionPld), R1) if err := scmpP.DecodeFromBytes(scmpH.Payload, gopacket.NilDecodeFeedback); err != nil { log.Debug("Parsing SCMPTraceroute", "err", err) // @ fold p.d.validResult(processResult{}, false) @@ -2669,7 +3077,7 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( Interface: uint64(interfaceID), } // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4482") // depends on packSCMP - return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil) + return p.packSCMP(slayers.SCMPTypeTracerouteReply, 0, &scmpP, nil /*@, dp @*/) } // @ preserves acc(p.scionLayer.Mem(ubScionL), R20) @@ -2678,8 +3086,12 @@ func (p *scionPacketProcessor) handleSCMPTraceRouteRequest( // @ ensures respr.OutPkt != nil ==> reserr != nil && sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr == nil ==> int(p.scionLayer.GetPayloadLen(ubScionL)) == len(p.scionLayer.GetPayload(ubScionL)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires dp.Valid() +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ absIO_val(dp, respr.OutPkt, respr.EgressID).isIO_val_Unsupported // @ decreases -func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) (respr processResult, reserr error) { +func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error) { // @ unfold acc(p.scionLayer.Mem(ubScionL), R20) // @ defer fold acc(p.scionLayer.Mem(ubScionL), R20) if int(p.scionLayer.PayloadLen) == len(p.scionLayer.Payload) { @@ -2693,6 +3105,7 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( &slayers.SCMPParameterProblem{Pointer: 0}, serrors.New("bad packet size", "header", p.scionLayer.PayloadLen, "actual", len(p.scionLayer.Payload)), + /*@ dp, @*/ ) } @@ -2706,6 +3119,8 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ requires p.scionLayer.Mem(ub) // @ requires p.path == p.scionLayer.GetPath(ub) // @ requires sl.AbsSlice_Bytes(ub, 0, len(ub)) +// @ requires acc(&p.ingressID, R20) +// @ requires acc(&p.segmentChange) && !p.segmentChange // @ preserves acc(&p.srcAddr, R10) && acc(p.srcAddr.Mem(), _) // @ preserves acc(&p.lastLayer, R10) // @ preserves p.lastLayer != nil @@ -2713,14 +3128,14 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ acc(p.lastLayer.Mem(nil), R10) // @ preserves (p.lastLayer !== &p.scionLayer && !llIsNil) ==> // @ acc(p.lastLayer.Mem(ub[startLL:endLL]), R10) -// @ preserves acc(&p.ingressID, R20) // @ preserves acc(&p.infoField) // @ preserves acc(&p.hopField) -// @ preserves acc(&p.segmentChange) // @ preserves acc(&p.mac, R10) && p.mac != nil && p.mac.Mem() // @ preserves acc(&p.macBuffers.scionInput, R10) // @ preserves sl.AbsSlice_Bytes(p.macBuffers.scionInput, 0, len(p.macBuffers.scionInput)) // @ preserves acc(&p.cachedMac) +// @ ensures acc(&p.segmentChange) +// @ ensures acc(&p.ingressID, R20) // @ ensures acc(&p.d, R5) // @ ensures acc(&p.path, R10) // @ ensures acc(&p.rawPkt, R1) @@ -2735,118 +3150,192 @@ func (p *scionPacketProcessor) validatePktLen( /*@ ghost ubScionL []byte @*/ ) ( // @ ensures reserr == nil ==> p.scionLayer.Mem(ub) // @ ensures reserr != nil ==> p.scionLayer.NonInitMem() // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.d.DpAgreesWithSpec(dp) +// @ requires dp.Valid() +// @ requires slayers.ValidPktMetaHdr(ub) && p.scionLayer.EqAbsHeader(ub) && p.scionLayer.EqPathType(ub) +// @ requires acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +// @ requires let absPkt := absIO_val(dp, ub, p.ingressID) in +// @ absPkt.isIO_val_Pkt2 ==> ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(p.ingressID), absPkt.IO_val_Pkt2_2) +// @ ensures reserr == nil && newAbsPkt.isIO_val_Pkt2 ==> +// @ ElemWitness(ioSharedArg.OBufY, newAbsPkt.IO_val_Pkt2_1, newAbsPkt.IO_val_Pkt2_2) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) +// @ ensures reserr != nil && respr.OutPkt != nil ==> +// @ newAbsPkt.isIO_val_Unsupported +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool @*/) { - if r, err := p.parsePath( /*@ ub @*/ ); err != nil { +// @ #backend[stateConsolidationMode(6)] +func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, ghost startLL int, ghost endLL int, ghost ioLock *sync.Mutex, ghost ioSharedArg SharedArg, ghost dp io.DataPlaneSpec @*/ ) (respr processResult, reserr error /*@, addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { + // @ ghost var oldPkt io.IO_pkt2 + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ absIO_valLemma(dp, ub, p.ingressID) + // @ oldPkt = absIO_val(dp, ub, p.ingressID).IO_val_Pkt2_2 + // @ } else { + // @ absPktFutureLemma(dp, ub) + // @ oldPkt = absPkt(dp, ub) + // @ } + // @ nextPkt := oldPkt + if r, err := p.parsePath( /*@ ub , dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateHopExpiry(); err != nil { + if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateIngressID(); err != nil { + if r, err := p.validateIngressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validatePktLen( /*@ ub @*/ ); err != nil { + // @ assert AbsValidateIngressIDConstraint(nextPkt, path.ifsToIO_ifs(p.ingressID)) + if r, err := p.validatePktLen( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } if r, err := p.validateTransitUnderlaySrc( /*@ ub @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateSrcDstIA( /*@ ub @*/ ); err != nil { + if r, err := p.validateSrcDstIA( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if err := p.updateNonConsDirIngressSegID( /*@ ub @*/ ); err != nil { + if err := p.updateNonConsDirIngressSegID( /*@ ub, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ } - if r, err := p.verifyCurrentMAC(); err != nil { + // @ assert absPkt(dp, ub) == AbsUpdateNonConsDirIngressSegID(oldPkt, path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(dp, ub) + // @ AbsValidateIngressIDLemma(oldPkt, nextPkt, path.ifsToIO_ifs(p.ingressID)) + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) + if r, err := p.handleIngressRouterAlert( /*@ ub, llIsNil, startLL, endLL, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - + // @ assert nextPkt == absPkt(dp, ub) // Inbound: pkts destined to the local IA. // @ p.d.getLocalIA() if /*@ unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in @*/ p.scionLayer.DstIA /*@ ) @*/ == p.d.localIA { - a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub @*/ ) + // @ assert p.DstIsLocalIngressID(ub) + // @ assert unfolding acc(p.scionLayer.Mem(ub), R50) in (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in p.scionLayer.DstIA) == p.d.localIA + // @ p.LocalDstLemma(ub, dp) + // @ assert p.ingressID != 0 + // @ assert len(nextPkt.CurrSeg.Future) == 1 + a, r, err /*@, aliasesUb @*/ := p.resolveInbound( /*@ ub, dp @*/ ) if err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, aliasesUb @*/ + return r, err /*@, aliasesUb, absReturnErr(dp, r) @*/ } // @ p.d.getInternal() // @ unfold p.d.validResult(r, aliasesUb) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, aliasesUb) // @ assert ub === p.rawPkt - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb @*/ + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, aliasesUb, newAbsPkt @*/ } - // Outbound: pkts leaving the local IA. // BRTransit: pkts leaving from the same BR different interface. - // @ unfold acc(p.scionLayer.Mem(ub), R3) // @ ghost ubPath := p.scionLayer.UBPath(ub) if p.path.IsXover( /*@ ubPath @*/ ) { + // @ assert p.GetIsXoverSpec(ub) // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.doXover( /*@ ub @*/ ); err != nil { - // @ fold p.d.validResult(r, false) - return r, err /*@, false @*/ + if r, err := p.doXover( /*@ ub, dp @*/ ); err != nil { + // @ fold p.d.validResult(processResult{}, false) + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateHopExpiry(); err != nil { + // @ assert absPkt(dp, ub) == AbsDoXover(nextPkt) + // @ AbsValidateIngressIDXoverLemma(nextPkt, AbsDoXover(nextPkt), path.ifsToIO_ifs(p.ingressID)) + // @ nextPkt = absPkt(dp, ub) + if r, err := p.validateHopExpiry( /*@ dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ } // verify the new block - if r, err := p.verifyCurrentMAC(); err != nil { - // fold acc(p.scionLayer.Mem(ub), R3) + if r, err := p.verifyCurrentMAC( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, serrors.WithCtx(err, "info", "after xover") /*@, false @*/ + return r, serrors.WithCtx(err, "info", "after xover") /*@, false, absReturnErr(dp, r) @*/ } + // @ assert AbsVerifyCurrentMACConstraint(nextPkt, dp) } // @ fold acc(p.scionLayer.Mem(ub), R3) - if r, err := p.validateEgressID(); err != nil { + // @ assert p.segmentChange ==> nextPkt.RightSeg != none[io.IO_seg2] + if r, err := p.validateEgressID( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } + // @ assert !p.segmentChange ==> AbsValidateEgressIDConstraint(nextPkt, (p.ingressID != 0), dp) + // @ assert p.segmentChange ==> p.ingressID != 0 && AbsValidateEgressIDConstraintXover(nextPkt, dp) // handle egress router alert before we check if it's up because we want to // send the reply anyway, so that trace route can pinpoint the exact link // that failed. - if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL @*/ ); err != nil { + if r, err := p.handleEgressRouterAlert( /*@ ub, llIsNil, startLL, endLL , dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - if r, err := p.validateEgressUp(); err != nil { + // @ assert nextPkt == absPkt(dp, ub) + if r, err := p.validateEgressUp( /*@ nextPkt, dp @*/ ); err != nil { // @ p.scionLayer.DowngradePerm(ub) - return r, err /*@, false @*/ + return r, err /*@, false, absReturnErr(dp, r) @*/ } - egressID := p.egressInterface() + // @ assert nextPkt == absPkt(dp, ub) + egressID := p.egressInterface( /*@ nextPkt @*/ ) + // @ assert AbsEgressInterfaceConstraint(nextPkt, path.ifsToIO_ifs(egressID)) // @ p.d.getExternalMem() // @ if p.d.external != nil { unfold acc(accBatchConn(p.d.external), _) } if c, ok := p.d.external[egressID]; ok { - if err := p.processEgress( /*@ ub @*/ ); err != nil { + if err := p.processEgress( /*@ ub, dp @*/ ); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@, false @*/ + return processResult{}, err /*@, false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.InDomainExternalInForwardingMetrics2(egressID) + // @ assert absPkt(dp, ub) == AbsProcessEgress(nextPkt) + // @ nextPkt = absPkt(dp, ub) + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ ghost if(!p.segmentChange) { + // enter/exit event + // @ ExternalEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } else { + // xover event + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, path.ifsToIO_ifs(egressID), ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, egressID) // @ fold p.d.validResult(processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, false) - return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{EgressID: egressID, OutConn: c, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } - // ASTransit: pkts leaving from another AS BR. // @ p.d.getInternalNextHops() // @ ghost if p.d.internalNextHops != nil { unfold acc(accAddr(p.d.internalNextHops), _) } if a, ok := p.d.internalNextHops[egressID]; ok { // @ p.d.getInternal() + // @ ghost if(path.ifsToIO_ifs(p.ingressID) != none[io.IO_ifs]) { + // @ TemporaryAssumeForIO(old(slayers.IsSupportedPkt(ub)) == slayers.IsSupportedPkt(ub)) + // @ ghost if(slayers.IsSupportedPkt(ub)) { + // @ if(!p.segmentChange) { + // enter event + // @ InternalEnterEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } else { + // xover event + // @ XoverEvent(oldPkt, path.ifsToIO_ifs(p.ingressID), nextPkt, none[io.IO_ifs], ioLock, ioSharedArg, dp) + // @ } + // @ } + // @ newAbsPkt = reveal absIO_val(dp, p.rawPkt, 0) + // @ } else { + // @ ToDoAfterScionFix("https://github.com/scionproto/scion/issues/4497") + // @ } // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, false) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false @*/ + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@, false, newAbsPkt @*/ } errCode := slayers.SCMPCodeUnknownHopFieldEgress if !p.infoField.ConsDir { @@ -2859,8 +3348,9 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, errCode, &slayers.SCMPParameterProblem{Pointer: p.currentHopPointer( /*@ nil @*/ )}, cannotRoute, + /*@ dp, @*/ ) - return tmp, err /*@, false @*/ + return tmp, err /*@, false, absReturnErr(dp, tmp) @*/ } // @ requires acc(&p.rawPkt, R15) @@ -2888,8 +3378,16 @@ func (p *scionPacketProcessor) process( /*@ ghost ub []byte, ghost llIsNil bool, // @ ensures respr.OutPkt !== p.rawPkt && respr.OutPkt != nil ==> // @ sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)) // @ ensures reserr != nil ==> reserr.ErrorMem() +// contracts for IO-spec +// @ requires p.scionLayer.EqPathType(p.rawPkt) +// @ requires !slayers.IsSupportedPkt(p.rawPkt) +// @ requires dp.Valid() +// @ ensures (respr.OutPkt == nil) == (newAbsPkt == io.IO_val_Unit{}) +// @ ensures respr.OutPkt != nil ==> +// @ newAbsPkt == absIO_val(dp, respr.OutPkt, respr.EgressID) && +// @ newAbsPkt.isIO_val_Unsupported // @ decreases 0 if sync.IgnoreBlockingForTermination() -func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error /*@ , addrAliasesPkt bool @*/) { +func (p *scionPacketProcessor) processOHP( /* @ ghost dp io.DataPlaneSpec @ */ ) (respr processResult, reserr error /*@ , addrAliasesPkt bool, ghost newAbsPkt io.IO_val @*/) { // @ ghost ubScionL := p.rawPkt // @ p.scionLayer.ExtractAcc(ubScionL) s := p.scionLayer @@ -2905,7 +3403,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ establishMemMalformedPath() // @ fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) - return processResult{}, malformedPath /*@ , false @*/ + return processResult{}, malformedPath /*@ , false, absReturnErr(dp, processResult{}) @*/ } if /*@ unfolding acc(s.Path.Mem(ubPath), R50) in @*/ !ohp.Info.ConsDir { // TODO parameter problem -> invalid path @@ -2914,7 +3412,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr( "OneHop path in reverse construction direction is not allowed", - malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false @*/ + malformedPath, "srcIA", s.SrcIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // OHP leaving our IA @@ -2927,7 +3425,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false @*/ + "localIA", p.d.localIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA, ok := p.d.neighborIAs[ /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/] @@ -2937,7 +3435,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ defer fold p.scionLayer.Mem(ubScionL) // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, - "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false @*/ + "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/)) /*@ , false, absReturnErr(dp, processResult{}) @*/ } if !neighborIA.Equal(s.DstIA) { // @ establishCannotRoute() @@ -2945,7 +3443,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "egress", ( /*@ unfolding acc(ohp.Mem(ubPath), R50) in (unfolding acc(ohp.FirstHop.Mem(), R55) in @*/ ohp.FirstHop.ConsEgress /*@ ) @*/), - "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false @*/ + "neighborIA", neighborIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) // @ unfold ohp.FirstHop.Mem() @@ -2970,9 +3468,9 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // TODO parameter problem -> invalid MAC // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.New("MAC", "expected", fmt.Sprintf("%x", macCopy), - "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false @*/ + "actual", fmt.Sprintf("%x", ohp.FirstHop.Mac), "type", "ohp") /*@ , false, absReturnErr(dp, processResult{}) @*/ } - ohp.Info.UpdateSegID(ohp.FirstHop.Mac) + ohp.Info.UpdateSegID(ohp.FirstHop.Mac /*@, ohp.FirstHop.ToIO_HF() @*/) // @ fold ohp.FirstHop.Mem() // @ fold s.Path.Mem(ubPath) // @ fold p.scionLayer.Mem(ubScionL) @@ -2981,7 +3479,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // changes made to 'updateSCIONLayer'. if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold p.scionLayer.Mem(ubScionL) // @ defer fold p.scionLayer.Mem(ubScionL) @@ -3002,14 +3500,15 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // domain of forwardingMetrics is the same as the one for external // @ p.d.InDomainExternalInForwardingMetrics(ohp.FirstHop.ConsEgress) // @ fold p.d.validResult(processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, false) + // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) return processResult{EgressID: ohp.FirstHop.ConsEgress, OutConn: c, OutPkt: p.rawPkt}, - nil /*@ , false @*/ + nil /*@ , false, reveal absIO_val(dp, respr.OutPkt, respr.EgressID) @*/ } // TODO parameter problem invalid interface // @ establishCannotRoute() // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WithCtx(cannotRoute, "type", "ohp", - "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false @*/ + "egress", ohp.FirstHop.ConsEgress, "consDir", ohp.Info.ConsDir) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // OHP entering our IA @@ -3020,7 +3519,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad destination IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false @*/ + "localIA", p.d.localIA, "dstIA", s.DstIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getNeighborIAs() neighborIA := p.d.neighborIAs[p.ingressID] @@ -3030,7 +3529,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.d.validResult(processResult{}, false) return processResult{}, serrors.WrapStr("bad source IA", cannotRoute, "type", "ohp", "ingress", p.ingressID, - "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false @*/ + "neighborIA", neighborIA, "srcIA", s.SrcIA) /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ unfold s.Path.Mem(ubPath) @@ -3054,7 +3553,7 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ fold p.scionLayer.Mem(ubScionL) if err := updateSCIONLayer(p.rawPkt, &p.scionLayer /* s */, p.buffer); err != nil { // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // (VerifiedSCION) the parameter was changed from 's' to '&p.scionLayer' due to the // changes made to 'resolveLocalDst'. @@ -3064,12 +3563,13 @@ func (p *scionPacketProcessor) processOHP() (respr processResult, reserr error / // @ apply acc(a.Mem(), R15) --* acc(sl.AbsSlice_Bytes(ubScionL, 0, len(ubScionL)), R15) // @ } // @ fold p.d.validResult(processResult{}, false) - return processResult{}, err /*@ , false @*/ + return processResult{}, err /*@ , false, absReturnErr(dp, processResult{}) @*/ } // @ p.d.getInternal() // @ assert p.d.internal != nil ==> acc(p.d.internal.Mem(), _) // @ fold p.d.validResult(processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, addrAliases) - return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases @*/ + // @ TemporaryAssumeForIO(!slayers.IsSupportedPkt(p.rawPkt)) + return processResult{OutConn: p.d.internal, OutAddr: a, OutPkt: p.rawPkt}, nil /*@ , addrAliases, reveal absIO_val(dp, respr.OutPkt, 0) @*/ } // @ requires acc(d.Mem(), _) @@ -3427,7 +3927,7 @@ func (p *scionPacketProcessor) prepareSCMP( if infoField.ConsDir { hopField := /*@ unfolding acc(revPath.HopFields[revPath.PathMeta.CurrHF].Mem(), _) in @*/ revPath.HopFields[revPath.PathMeta.CurrHF] - infoField.UpdateSegID(hopField.Mac) + infoField.UpdateSegID(hopField.Mac /*@, hopField.ToIO_HF() @*/) } // @ fold revPath.Base.Mem() // @ fold revPath.Mem(rawPath) @@ -3532,6 +4032,11 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr == nil && 0 <= idx ==> retl === opts[idx] // @ ensures reterr == nil ==> retl != nil // @ ensures reterr == nil ==> base.Mem(data) +// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> +// @ slayers.ValidPktMetaHdr(data) +// @ ensures reterr == nil && typeOf(base.GetPath(data)) == *scion.Raw ==> +// @ base.EqAbsHeader(data) +// @ ensures reterr == nil ==> base.EqPathType(data) // @ ensures forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> // @ (processed[i] ==> (0 <= offsets[i].start && offsets[i].start <= offsets[i].end && offsets[i].end <= len(data))) // @ ensures reterr == nil ==> forall i int :: {&opts[i]}{processed[i]} 0 <= i && i < len(opts) ==> @@ -3544,7 +4049,9 @@ func (p *scionPacketProcessor) prepareSCMP( // @ ensures reterr != nil ==> (forall i int :: { &opts[i] } 0 <= i && i < len(opts) ==> opts[i].NonInitMem()) // @ ensures reterr != nil ==> reterr.ErrorMem() // @ decreases -func decodeLayers(data []byte, base gopacket.DecodingLayer, +// (VerifiedSCION) originally, `base` was declared with type `gopacket.DecodingLayer`. This is unnecessarily complicated for a private function +// that is only called once with a parameter of type `*SCION`, and leads to more annyoing post-conditions. +func decodeLayers(data []byte, base *slayers.SCION, opts ...gopacket.DecodingLayer) (retl gopacket.DecodingLayer, reterr error /*@ , ghost processed seq[bool], ghost offsets seq[offsetPair], ghost idx int @*/) { // @ processed = seqs.NewSeqBool(len(opts)) @@ -3554,15 +4061,20 @@ func decodeLayers(data []byte, base gopacket.DecodingLayer, if err := base.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { return nil, err /*@ , processed, offsets, idx @*/ } - last := base + var last gopacket.DecodingLayer = base optsSlice := ([](gopacket.DecodingLayer))(opts) // @ ghost oldData := data // @ ghost oldStart := 0 // @ ghost oldEnd := len(data) - // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R40) + // @ invariant acc(sl.AbsSlice_Bytes(oldData, 0, len(oldData)), R39) // @ invariant base.Mem(oldData) + // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> + // @ slayers.ValidPktMetaHdr(oldData) + // @ invariant typeOf(base.GetPath(oldData)) == *scion.Raw ==> + // @ base.EqAbsHeader(oldData) + // @ invariant base.EqPathType(oldData) // @ invariant 0 < len(opts) ==> 0 <= i0 && i0 <= len(opts) // @ invariant forall i int :: {&opts[i]} 0 <= i && i < len(opts) ==> acc(&opts[i], R10) // @ invariant forall i, j int :: {&opts[i], &opts[j]} 0 <= i && i < j && j < len(opts) ==> opts[i] !== opts[j] diff --git a/router/io-spec-abstract-transitions.gobra b/router/io-spec-abstract-transitions.gobra new file mode 100644 index 000000000..afd145aa0 --- /dev/null +++ b/router/io-spec-abstract-transitions.gobra @@ -0,0 +1,235 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "sync" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers" + io "verification/io" + sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" +) + +ghost +opaque +requires len(oldPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) == len(oldPkt.CurrSeg.Future) +decreases +pure func AbsUpdateNonConsDirIngressSegID(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs]) (newPkt io.IO_pkt2) { + return ingressID == none[io.IO_ifs] ? oldPkt : io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}) +} + +ghost +opaque +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsValidateIngressIDConstraint(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + ingressID != none[io.IO_ifs] ==> + ingressID == (currseg.ConsDir ? currseg.Future[0].InIF2 : currseg.Future[0].EgIF2) +} + +ghost +opaque +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateIngressIDConstraintXover(pkt io.IO_pkt2, ingressID option[io.IO_ifs]) bool { + return let rightseg := get(pkt.RightSeg) in + ingressID != none[io.IO_ifs] ==> + ingressID == (rightseg.ConsDir ? rightseg.Past[0].InIF2 : rightseg.Past[0].EgIF2) +} + +ghost +opaque +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsEgressInterfaceConstraint(pkt io.IO_pkt2, egressID option[io.IO_ifs]) bool { + return let currseg := pkt.CurrSeg in + egressID == (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsValidateEgressIDConstraint(pkt io.IO_pkt2, enter bool, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in + (enter ==> dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), currseg.Future[0])) && + nextIf != none[io.IO_ifs] && + (get(nextIf) in domain(dp.GetNeighborIAs())) +} + +ghost +opaque +requires len(oldPkt.CurrSeg.Future) > 0 +ensures len(newPkt.CurrSeg.Future) >= 0 +decreases +pure func AbsProcessEgress(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}) +} + +ghost +opaque +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) == 1 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +ensures len(newPkt.CurrSeg.Future) > 0 +ensures newPkt.RightSeg != none[io.IO_seg2] +ensures len(get(newPkt.RightSeg).Past) > 0 +decreases +pure func AbsDoXover(oldPkt io.IO_pkt2) (newPkt io.IO_pkt2) { + return io.IO_pkt2( + io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, false))}) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +requires pkt.RightSeg != none[io.IO_seg2] +requires len(get(pkt.RightSeg).Past) > 0 +decreases +pure func AbsValidateEgressIDConstraintXover(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let rightseg := get(pkt.RightSeg) in + let nextIf := (currseg.ConsDir ? currseg.Future[0].EgIF2 : currseg.Future[0].InIF2) in + dp.xover_up2down2_link_type_dir(dp.Asid(), rightseg.ConsDir, rightseg.Past[0], + currseg.ConsDir, currseg.Future[0]) && + nextIf != none[io.IO_ifs] && + (get(nextIf) in domain(dp.GetNeighborIAs())) +} + +ghost +opaque +requires dp.Valid() +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func AbsVerifyCurrentMACConstraint(pkt io.IO_pkt2, dp io.DataPlaneSpec) bool { + return let currseg := pkt.CurrSeg in + let d := currseg.ConsDir in + let ts := currseg.AInfo in + let hf := currseg.Future[0] in + let uinfo := currseg.UInfo in + dp.hf_valid(d, ts, uinfo, hf) +} + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires egressID == none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(newPkt, dp) +requires len(newPkt.CurrSeg.Future) == 1 || AbsValidateEgressIDConstraint(newPkt, true, dp) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func InternalEnterEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(newPkt, dp) + if(len(newPkt.CurrSeg.Future) != 1) { + reveal AbsValidateEgressIDConstraint(newPkt, true, dp) + } + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} + +ghost +requires dp.Valid() +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires AbsValidateEgressIDConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), (ingressID != none[io.IO_ifs]), dp) +requires AbsEgressInterfaceConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), egressID) +requires newPkt == AbsProcessEgress(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func ExternalEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + nextPkt := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(nextPkt, dp) + reveal AbsEgressInterfaceConstraint(nextPkt, egressID) + reveal AbsValidateEgressIDConstraint(nextPkt, (ingressID != none[io.IO_ifs]), dp) + reveal AbsProcessEgress(nextPkt) + if(ingressID == none[io.IO_ifs]){ + AtomicExit(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } else { + AtomicEnter(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) + } +} + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires AbsVerifyCurrentMACConstraint(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID), dp) +requires len(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).CurrSeg.Future) == 1 +requires AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg != none[io.IO_seg2] +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).Future) > 0 +requires len(get(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID).LeftSeg).History) == 0 +requires AbsVerifyCurrentMACConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires AbsValidateEgressIDConstraintXover(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), dp) +requires AbsEgressInterfaceConstraint(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)), egressID) +requires egressID == none[io.IO_ifs] ==> newPkt == AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID)) +requires egressID != none[io.IO_ifs] ==> newPkt == AbsProcessEgress(AbsDoXover(AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID))) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures dp.Valid() +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases +func XoverEvent(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + reveal dp.Valid() + intermediatePkt1 := reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + intermediatePkt2 := reveal AbsDoXover(intermediatePkt1) + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt1, dp) + reveal AbsVerifyCurrentMACConstraint(intermediatePkt2, dp) + reveal AbsValidateEgressIDConstraintXover(intermediatePkt2, dp) + reveal AbsEgressInterfaceConstraint(intermediatePkt2, egressID) + if(egressID != none[io.IO_ifs]){ + reveal AbsProcessEgress(intermediatePkt2) + } + AtomicXoverUp2Down(oldPkt, ingressID, newPkt, egressID, ioLock, ioSharedArg, dp) +} diff --git a/router/io-spec-atomic-events.gobra b/router/io-spec-atomic-events.gobra new file mode 100644 index 000000000..b93ae6ede --- /dev/null +++ b/router/io-spec-atomic-events.gobra @@ -0,0 +1,160 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +// This file contains the definition of operations that perform the atomic transitions of state +// in the IO spec. They all take a *sync.Mutex, which acts as a logical invariant, because Gobra +// does not support invariants natively. As such, we can only get access to the invariants if we +// first lock the mutex, which is a blocking operation. Even though all these operations are +// terminating, Gobra cannot currently prove this and thus, we assume termination for all methods +// in this file. + +package router + +import ( + "sync" + io "verification/io" +) + +ghost +requires dp.Valid() +requires ingressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp2_enter_guard( + oldPkt, + oldPkt.CurrSeg, + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + dp.Asid(), + oldPkt.CurrSeg.Future[0], + get(ingressID), + oldPkt.CurrSeg.Future[1:]) +requires dp.dp3s_forward( + io.IO_pkt2( + io.IO_Packet2{ + io.establishGuardTraversedseg(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + oldPkt.LeftSeg, + oldPkt.MidSeg, + oldPkt.RightSeg}), + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicEnter(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_enter_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_enter(s, t) + io.TriggerBodyIoEnter(pkt_internal) + tN := io.CBio_IN_bio3s_enter_T(t, pkt_internal) + io.Enter(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires ingressID == none[io.IO_ifs] +requires egressID != none[io.IO_ifs] +requires len(oldPkt.CurrSeg.Future) > 0 +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.dp3s_forward_ext(oldPkt, newPkt, get(egressID)) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicExit(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val2{oldPkt, newPkt, get(egressID)}) + assert dp.dp3s_iospec_bio3s_exit_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_exit(s, t) + io.TriggerBodyIoExit(pkt_internal) + tN := io.dp3s_iospec_bio3s_exit_T(t, pkt_internal) + io.Exit(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} + +ghost +requires dp.Valid() +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(oldPkt.CurrSeg.Future) > 0 +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires ingressID != none[io.IO_ifs] +requires ElemWitness(ioSharedArg.IBufY, ingressID, oldPkt) +requires dp.xover_up2down2_link_type_dir( + dp.Asid(), + oldPkt.CurrSeg.ConsDir, + oldPkt.CurrSeg.Future[0], + get(oldPkt.LeftSeg).ConsDir, + get(oldPkt.LeftSeg).Future[0]) +requires dp.dp2_xover_common_guard( + oldPkt, + oldPkt.CurrSeg, + get(oldPkt.LeftSeg), + io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir), + io.IO_pkt2(io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + oldPkt.CurrSeg.Future[0], + get(oldPkt.LeftSeg).Future[0], + get(oldPkt.LeftSeg).Future[1:], + dp.Asid(), + get(ingressID)) +requires dp.dp3s_forward_xover( + io.IO_pkt2(io.IO_Packet2{ + get(oldPkt.LeftSeg), + oldPkt.MidSeg, + oldPkt.RightSeg, + some(io.establishGuardTraversedsegInc(oldPkt.CurrSeg, !oldPkt.CurrSeg.ConsDir))}), + newPkt, + egressID) +preserves acc(ioLock.LockP(), _) && ioLock.LockInv() == SharedInv!< dp, ioSharedArg !>; +ensures ElemWitness(ioSharedArg.OBufY, egressID, newPkt) +decreases _ +func AtomicXoverUp2Down(oldPkt io.IO_pkt2, ingressID option[io.IO_ifs], newPkt io.IO_pkt2, egressID option[io.IO_ifs], ioLock *sync.Mutex, ioSharedArg SharedArg, dp io.DataPlaneSpec) { + ghost ioLock.Lock() + unfold SharedInv!< dp, ioSharedArg !>() + t, s := *ioSharedArg.Place, *ioSharedArg.State + ApplyElemWitness(s.ibuf, ioSharedArg.IBufY, ingressID, oldPkt) + ghost pkt_internal := io.IO_val(io.IO_Internal_val1{oldPkt, get(ingressID), newPkt, egressID}) + assert dp.dp3s_iospec_bio3s_xover_up2down_guard(s, t, pkt_internal) + unfold dp.dp3s_iospec_ordered(s, t) + unfold dp.dp3s_iospec_bio3s_xover_up2down(s, t) + io.TriggerBodyIoXoverUp2Down(pkt_internal) + tN := io.dp3s_iospec_bio3s_xover_up2down_T(t, pkt_internal) + io.Xover_up2down(t, pkt_internal) //Event + UpdateElemWitness(s.obuf, ioSharedArg.OBufY, egressID, newPkt) + ghost *ioSharedArg.State = io.dp3s_add_obuf(s, egressID, newPkt) + ghost *ioSharedArg.Place = tN + fold SharedInv!< dp, ioSharedArg !>() + ghost ioLock.Unlock() +} \ No newline at end of file diff --git a/router/io-spec-non-proven-lemmas.gobra b/router/io-spec-non-proven-lemmas.gobra new file mode 100644 index 000000000..6edcde280 --- /dev/null +++ b/router/io-spec-non-proven-lemmas.gobra @@ -0,0 +1,261 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + "sync" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" + "github.com/scionproto/scion/pkg/slayers" + "verification/dependencies/encoding/binary" + io "verification/io" + sl "github.com/scionproto/scion/verification/utils/slices" + . "verification/utils/definitions" +) + +ghost +preserves dp.Valid() +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +ensures slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw) ==> + absIO_val(dp, raw, ingressID).isIO_val_Pkt2 && + absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) && + len(absPkt(dp, raw).CurrSeg.Future) > 0 +decreases +func absIO_valLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16) { + if(slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)){ + absIO := reveal absIO_val(dp, raw, ingressID) + assert absIO.isIO_val_Pkt2 + assert absIO_val(dp, raw, ingressID).IO_val_Pkt2_2 == absPkt(dp, raw) + absPktFutureLemma(dp, raw) + } +} + +ghost +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +ensures dp.Valid() +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +ensures slayers.ValidPktMetaHdr(raw) +ensures len(absPkt(dp, raw).CurrSeg.Future) > 0 +decreases +func absPktFutureLemma(dp io.DataPlaneSpec, raw []byte) { + reveal slayers.ValidPktMetaHdr(raw) + headerOffset := slayers.GetAddressOffset(raw) + assert forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k] + hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) + metaHdr := scion.DecodedFrom(hdr) + currINFIdx := int(metaHdr.CurrINF) + currHFIdx := int(metaHdr.CurrHF) + seg1Len := int(metaHdr.SegLen[0]) + seg2Len := int(metaHdr.SegLen[1]) + seg3Len := int(metaHdr.SegLen[2]) + segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) + numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) + offset := scion.HopFieldOffset(numINF, 0, headerOffset) + pkt := reveal absPkt(dp, raw) + assert pkt.CurrSeg == reveal scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset) + assert len(pkt.CurrSeg.Future) > 0 +} + +ghost +requires len(oldPkt.CurrSeg.Future) > 0 +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) +ensures AbsValidateIngressIDConstraint(newPkt, ingressID) +decreases +func AbsValidateIngressIDLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsUpdateNonConsDirIngressSegID(oldPkt, ingressID) + reveal AbsValidateIngressIDConstraint(newPkt, ingressID) +} + +ghost +requires len(oldPkt.CurrSeg.Future) == 1 +requires oldPkt.LeftSeg != none[io.IO_seg2] +requires len(get(oldPkt.LeftSeg).Future) > 0 +requires len(get(oldPkt.LeftSeg).History) == 0 +requires AbsValidateIngressIDConstraint(oldPkt, ingressID) +requires newPkt == AbsDoXover(oldPkt) +ensures AbsValidateIngressIDConstraintXover(newPkt, ingressID) +decreases +func AbsValidateIngressIDXoverLemma(oldPkt io.IO_pkt2, newPkt io.IO_pkt2, ingressID option[io.IO_ifs]) { + reveal AbsValidateIngressIDConstraint(oldPkt, ingressID) + reveal AbsDoXover(oldPkt) + reveal AbsValidateIngressIDConstraintXover(newPkt, ingressID) +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +decreases +pure func (p *scionPacketProcessor) DstIsLocalIngressID(ub []byte) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> p.ingressID != 0 +} + +ghost +opaque +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +requires dp.Valid() +decreases +pure func (p *scionPacketProcessor) LastHopLen(ub []byte, dp io.DataPlaneSpec) bool { + return (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) ==> + len(absPkt(dp, ub).CurrSeg.Future) == 1 +} + +//TODO: Does not work with --disableNL --unsafeWildcardoptimization +ghost +requires acc(p.scionLayer.Mem(ub), R50) +requires acc(&p.d, R55) && acc(p.d.Mem(), _) +requires acc(&p.ingressID, R55) +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +requires slayers.ValidPktMetaHdr(ub) +requires p.DstIsLocalIngressID(ub) +requires p.LastHopLen(ub, dp) +requires (unfolding acc(p.scionLayer.Mem(ub), R50) in + (unfolding acc(p.scionLayer.HeaderMem(ub[slayers.CmnHdrLen:]), R55) in + p.scionLayer.DstIA) == (unfolding acc(p.d.Mem(), _) in p.d.localIA)) +ensures acc(p.scionLayer.Mem(ub), R50) +ensures acc(&p.d, R55) && acc(p.d.Mem(), _) +ensures acc(&p.ingressID, R55) +ensures dp.Valid() +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +ensures slayers.ValidPktMetaHdr(ub) +ensures p.ingressID != 0 +ensures len(absPkt(dp, ub).CurrSeg.Future) == 1 +decreases +func (p* scionPacketProcessor) LocalDstLemma(ub []byte, dp io.DataPlaneSpec) { + reveal p.DstIsLocalIngressID(ub) + reveal p.LastHopLen(ub, dp) +} + +ghost +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(&p.path, R55) && p.path == p.scionLayer.GetPath(ub) +decreases +pure func (p* scionPacketProcessor) GetIsXoverSpec(ub []byte) bool { + return let ubPath := p.scionLayer.UBPath(ub) in + unfolding acc(p.scionLayer.Mem(ub), R55) in + p.path.GetIsXoverSpec(ubPath) +} + +// TODO prove +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetPath(ub) +requires dp.Valid() +requires slayers.ValidPktMetaHdr(ub) +requires start == p.scionLayer.PathStartIdx(ub) +requires end == p.scionLayer.PathEndIdx(ub) +requires p.scionLayer.EqAbsHeader(ub) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures dp.Valid() +ensures slayers.ValidPktMetaHdr(ub) +ensures start == p.scionLayer.PathStartIdx(ub) +ensures end == p.scionLayer.PathEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.path.EqAbsHeader(ub[start:end]) +ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +decreases +func (p* scionPacketProcessor) AbsPktToSubSliceAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) +/* { + reveal validPktMetaHdr(ub) + reveal p.scionLayer.EqAbsHeader(ub) + unfold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) + unfold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + assert reveal scion.validPktMetaHdr(ub[start:end]) + unfold acc(p.scionLayer.Mem(ub), R56) + assert p.scionLayer.Path.(*scion.Raw).EqAbsHeader(ub[start:end]) + assert p.path.EqAbsHeader(ub[start:end]) + fold acc(p.scionLayer.Mem(ub), R56) + assert reveal absPkt(dp, ub) == reveal p.path.absPkt(dp, ub[start:end]) + fold acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R56) + fold acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R56) +}*/ + +// TODO prove +ghost +requires 0 <= start && start <= end && end <= len(ub) +requires acc(p.scionLayer.Mem(ub), R55) +requires acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +requires acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +requires acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +requires p.path === p.scionLayer.GetPath(ub) +requires dp.Valid() +requires scion.validPktMetaHdr(ub[start:end]) +requires start == p.scionLayer.PathStartIdx(ub) +requires end == p.scionLayer.PathEndIdx(ub) +requires p.path.EqAbsHeader(ub[start:end]) +ensures acc(sl.AbsSlice_Bytes(ub, 0, len(ub)), R55) +ensures acc(p.scionLayer.Mem(ub), R55) +ensures acc(sl.AbsSlice_Bytes(ub[start:end], 0, len(ub[start:end])), R55) +ensures acc(&p.path, R55) && acc(p.path.Mem(ub[start:end]), R55) +ensures dp.Valid() +ensures slayers.ValidPktMetaHdr(ub) +ensures start == p.scionLayer.PathStartIdx(ub) +ensures end == p.scionLayer.PathEndIdx(ub) +ensures scion.validPktMetaHdr(ub[start:end]) +ensures p.scionLayer.EqAbsHeader(ub) +ensures absPkt(dp, ub) == p.path.absPkt(dp, ub[start:end]) +decreases +func (p* scionPacketProcessor) SubSliceAbsPktToAbsPkt(ub []byte, start int, end int, dp io.DataPlaneSpec) + +ghost +opaque +requires acc(&p.hopField, R55) +requires len(pkt.CurrSeg.Future) > 0 +decreases +pure func (p* scionPacketProcessor) EqAbsHopField(pkt io.IO_pkt2) bool { + return let absHop := p.hopField.ToIO_HF() in + let currHF := pkt.CurrSeg.Future[0] in + absHop.InIF2 == currHF.InIF2 && + absHop.EgIF2 == currHF.EgIF2 && + absHop.HVF == currHF.HVF +} + +ghost +opaque +requires acc(&p.infoField, R55) +decreases +pure func (p* scionPacketProcessor) EqAbsInfoField(pkt io.IO_pkt2) bool { + return let absInf := p.infoField.ToIntermediateAbsInfoField() in + let currseg := pkt.CurrSeg in + absInf.AInfo == currseg.AInfo && + absInf.UInfo == currseg.UInfo && + absInf.ConsDir == currseg.ConsDir && + absInf.Peer == currseg.Peer +} \ No newline at end of file diff --git a/router/io-spec.gobra b/router/io-spec.gobra index 6de64f1bb..dc1377a00 100644 --- a/router/io-spec.gobra +++ b/router/io-spec.gobra @@ -17,507 +17,48 @@ package router import ( - sl "github.com/scionproto/scion/verification/utils/slices" - "github.com/scionproto/scion/verification/io" - "github.com/scionproto/scion/verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers" "github.com/scionproto/scion/pkg/slayers/path" "github.com/scionproto/scion/pkg/slayers/path/scion" "github.com/scionproto/scion/private/topology" -) - -ghost -decreases -pure func numInfoFields(seg1Len int, seg2Len int, seg3Len int) int { - return seg3Len > 0 ? 3 : (seg2Len > 0 ? 2 : 1) -} - -ghost -decreases -pure func hopFieldOffset(numINF int, currHF int) int { - return path.InfoFieldOffset(numINF) + path.HopLen * currHF -} - -ghost -decreases -pure func pktLen(seg1Len int, seg2Len int, seg3Len int) int { - return hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + - path.HopLen * (seg1Len + seg2Len + seg3Len) -} - - -ghost -decreases -pure func lengthOfCurrSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) int { - return seg1Len > currHF ? seg1Len : ((seg1Len + seg2Len) > currHF ? seg2Len : seg3Len) -} - -ghost -requires 0 <= currHF -ensures res <= currHF -decreases -pure func lengthOfPrevSeg(currHF int, seg1Len int, seg2Len int, seg3Len int) (res int) { - return seg1Len > currHF ? 0 : ((seg1Len + seg2Len) > currHF ? seg1Len : seg1Len + seg2Len) -} - -// returns the ASid of a hopfield -ghost -requires 1 <= numINF -requires 0 <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires let idx := hopFieldOffset(numINF, currHFIdx) in - acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -decreases -pure func asidFromIfs( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - consDir bool, - asid io.IO_as) (res option[io.IO_as]) { - return let idx := hopFieldOffset(numINF, currHFIdx) in - let ifs := consDir ? binary.BigEndian.Uint16(raw[idx+4:idx+6]) : binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let asIfPair := io.AsIfsPair{asid, io.IO_ifs(ifs)} in - (asIfPair in domain(dp.GetLinks()) ? - some(dp.Lookup(asIfPair).asid) : none[io.IO_as]) -} - -// returns a list of ASids of hopfields that are before the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires hopFieldOffset(numINF, currHFIdx) + path.HopLen <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == currHFIdx - prevSegLen + 1 -decreases currHFIdx - prevSegLen -pure func asidsBefore( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, !consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == prevSegLen ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsBefore(dp, raw, numINF, currHFIdx-1, prevSegLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(get(next_asid_seq) ++ seq[io.IO_as]{get(next_asid)}) - } - } -} - -// returns a list of ASids of hopfields that are after the current hopfield in a segment -ghost -requires 1 <= numINF -requires 0 <= currHFIdx && currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - currHFIdx -decreases segLen - currHFIdx + 1 -pure func asidsAfter( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return let next_asid := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid)) in - match next_asid{ - case none[io.IO_as]: - none[seq[io.IO_as]] - default: - currHFIdx == segLen - 1 ? some(seq[io.IO_as]{get(next_asid)}) : - let next_asid_seq := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(next_asid)) in - match next_asid_seq{ - case none[seq[io.IO_as]]: - none[seq[io.IO_as]] - default: - some(seq[io.IO_as]{get(next_asid)} ++ get(next_asid_seq)) - } - } -} - -// returns a list of ASids of hopfields for CurrSeg in the abstract packet -ghost -requires 1 <= numINF -requires 0 <= prevSegLen && prevSegLen <= currHFIdx -requires currHFIdx < segLen -requires hopFieldOffset(numINF, segLen) <= len(raw) -requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures res != none[seq[io.IO_as]] ==> len(get(res)) == segLen - prevSegLen -decreases -pure func asidForCurrSeg( - dp io.DataPlaneSpec, - raw []byte, - numINF int, - currHFIdx int, - segLen int, - prevSegLen int, - consDir bool, - asid io.IO_as) (res option[seq[io.IO_as]]) { - return segLen == 0 ? some(seq[io.IO_as]{}) : - let left := asidsBefore(dp, raw, numINF, currHFIdx, prevSegLen, consDir, asid) in - let right := asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) in - (left == none[seq[io.IO_as]] || right == none[seq[io.IO_as]]) ? - none[seq[io.IO_as]] : - some(get(left) ++ get(right)[1:]) -} - -// returns a list of ASids of hopfields for LeftSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 1 <= currINFIdx && currINFIdx < 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func asidsForLeftSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - (currINFIdx == 1 && seg2Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for RightSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires -1 <= currINFIdx && currINFIdx < 2 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures (currINFIdx == 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg1Len -ensures (currINFIdx == 1 && seg2Len > 0 && res != none[seq[io.IO_as]]) ==> len(get(res)) == seg2Len -decreases -pure func asidsForRightSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as) (res option[seq[io.IO_as]]) { - return (currINFIdx == 1 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir, asid) : - (currINFIdx == 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, asid) : - some(seq[io.IO_as]{}) -} - -// returns a list of ASids of hopfields for MidSeg in the abstract packet -ghost -requires dp.Valid() -requires 1 <= numINF -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= len(raw) -requires currINFIdx <= numINF + 1 -requires 2 <= currINFIdx && currINFIdx < 5 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] -decreases -pure func asidsForMidSeg(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as]) (res option[seq[io.IO_as]]) { - return (currINFIdx == 4 && seg2Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 1) in - asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir, get(asid)) : - (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, 2) in - asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir, get(asid)) : - some(seq[io.IO_as]{}) -} - -ghost -requires idx + path.HopLen <= len(raw) -requires 0 <= idx -requires acc(&raw[idx+2], _) && acc(&raw[idx+3], _) && acc(&raw[idx+4], _) && acc(&raw[idx+5], _) -ensures len(res.HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases -pure func hopField(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo) (res io.IO_HF) { - return let inif2 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) in - let egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) in - let op_inif2 := inif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif2)) in - let op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) in - let ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) in - let l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), - io.IO_msgterm(io.MsgTerm_FS{beta})}}) in - let hvf := io.mac(io.macKey(io.asidToKey(asid)), l) in - io.IO_HF(io.IO_HF_{ - InIF2 : op_inif2, - EgIF2 : op_egif2, - HVF : hvf, - }) -} - -ghost -requires 0 <= offset -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == len(asid) - currHFIdx -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases len(asid) - currHFIdx -pure func hopFieldsConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == len(asid) ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - seq[io.IO_HF]{hf} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) -} - -ghost -requires 0 <= offset -requires -1 <= currHFIdx && currHFIdx < len(asid) -requires offset + path.HopLen * currHFIdx + path.HopLen <= len(raw) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -ensures len(res) == currHFIdx + 1 -ensures forall k int :: {res[k]} 0 <= k && k < len(res) ==> - len(res[k].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 -decreases currHFIdx + 1 -pure func hopFieldsNotConsDir( - raw []byte, - offset int, - currHFIdx int, - beta set[io.IO_msgterm], - asid seq[io.IO_as], - ainfo io.IO_ainfo) (res seq[io.IO_HF]) { - return currHFIdx == -1 ? seq[io.IO_HF]{} : - let hf := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo)) in - hopFieldsNotConsDir(raw, offset, currHFIdx -1, (beta union set[io.IO_msgterm]{hf.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf} -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segPast(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == -1 ? - seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segPast(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= currHFIdx && currHFIdx <= len(hopfields) -decreases len(hopfields) - currHFIdx -pure func segFuture(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_HF] { - return currHFIdx == len(hopfields) ? seq[io.IO_HF]{} : - seq[io.IO_HF]{hopfields[currHFIdx]} ++ segFuture(hopfields, currHFIdx + 1) -} - -ghost -requires -1 <= currHFIdx && currHFIdx < len(hopfields) -decreases currHFIdx + 1 -pure func segHistory(hopfields seq[io.IO_HF], currHFIdx int) seq[io.IO_ahi] { - return currHFIdx == -1 ? seq[io.IO_ahi]{} : - seq[io.IO_ahi]{hopfields[currHFIdx].Toab()} ++ segHistory(hopfields, currHFIdx - 1) -} - -ghost -requires 0 <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func segment(raw []byte, - offset int, - currHFIdx int, - asid seq[io.IO_as], - ainfo io.IO_ainfo, - consDir bool, - peer bool) io.IO_seg2 { - return let hopfields := consDir ? - hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : - hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) in - let uinfo := uInfo(hopfields, currHFIdx, consDir) in - io.IO_seg2(io.IO_seg3_{ - AInfo :ainfo, - UInfo : uinfo, - ConsDir : consDir, - Peer : peer, - Past : segPast(hopfields, currHFIdx - 1), - Future : segFuture(hopfields, currHFIdx), - History : segHistory(hopfields, currHFIdx - 1), - }) -} - -ghost -requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset -requires 0 < len(asid) -requires offset + path.HopLen * len(asid) <= len(raw) -requires 0 <= currHFIdx && currHFIdx <= len(asid) -requires 0 <= currINFIdx && currINFIdx < 3 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func currSeg(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as]) io.IO_seg3 { - return unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let ainfo := timestamp(raw, currINFIdx) in - let consDir := path.ConsDir(raw, currINFIdx) in - let peer := path.Peer(raw, currINFIdx) in - segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 1 <= currINFIdx && currINFIdx < 4 -requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func leftSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} - -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires -1 <= currINFIdx && currINFIdx < 2 -requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len -requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func rightSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) : - (currINFIdx == 0 && seg2Len > 0) ? - some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) : - none[io.IO_seg3] -} + "github.com/scionproto/scion/pkg/addr" + "golang.org/x/net/ipv4" -ghost -requires 0 < seg1Len -requires 0 <= seg2Len -requires 0 <= seg3Len -requires pktLen(seg1Len, seg2Len, seg3Len) <= len(raw) -requires 2 <= currINFIdx && currINFIdx < 5 -requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len -requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func midSeg( - raw []byte, - currINFIdx int, - seg1Len int, - seg2Len int, - seg3Len int, - asid seq[io.IO_as]) option[io.IO_seg3] { - return let offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) in - (currINFIdx == 4 && seg2Len > 0) ? - some(currSeg(raw, offset, 0, seg1Len, asid)) : - ((currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ? - some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) : - none[io.IO_seg3]) -} + "verification/dependencies/encoding/binary" + "verification/io" + sl "verification/utils/slices" + . "verification/utils/definitions" +) ghost +opaque requires dp.Valid() -requires len(raw) > 4 -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -requires unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) -decreases -pure func absPkt(dp io.DataPlaneSpec, raw []byte, asid io.IO_as) option[io.IO_pkt2] { - return let hdr := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in binary.BigEndian.Uint32(raw[0:4]) in +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) +requires slayers.ValidPktMetaHdr(raw) +decreases +pure func absPkt(dp io.DataPlaneSpec, raw []byte) (res io.IO_pkt2) { + return let _ := reveal slayers.ValidPktMetaHdr(raw) in + let headerOffset := slayers.GetAddressOffset(raw) in + let _ := Asserting(forall k int :: {&raw[headerOffset:headerOffset+scion.MetaLen][k]} 0 <= k && k < scion.MetaLen ==> + &raw[headerOffset:headerOffset+scion.MetaLen][k] == &raw[headerOffset + k]) in + let hdr := (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) in + binary.BigEndian.Uint32(raw[headerOffset:headerOffset+scion.MetaLen])) in let metaHdr := scion.DecodedFrom(hdr) in let currINFIdx := int(metaHdr.CurrINF) in let currHFIdx := int(metaHdr.CurrHF) in let seg1Len := int(metaHdr.SegLen[0]) in let seg2Len := int(metaHdr.SegLen[1]) in let seg3Len := int(metaHdr.SegLen[2]) in - let segLen := lengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let prevSegLen := lengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in - let numINF := numInfoFields(seg1Len, seg2Len, seg3Len) in - let offset := hopFieldOffset(numINF, 0) in - let consDir := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in path.ConsDir(raw, currINFIdx) in - let currAsidSeq := asidForCurrSeg(dp, raw, numINF, currHFIdx, prevSegLen+segLen, prevSegLen, consDir, dp.Asid()) in - currAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - let last := get(currAsidSeq)[segLen-1] in - let first := get(currAsidSeq)[0] in - let leftAsidSeq := asidsForLeftSeg(dp, raw, numINF, currINFIdx + 1, seg1Len, seg2Len, seg3Len, last) in - let rightAsidSeq := asidsForRightSeg(dp, raw, numINF, currINFIdx - 1, seg1Len, seg2Len, seg3Len, first) in - (leftAsidSeq == none[seq[io.IO_as]] || rightAsidSeq == none[seq[io.IO_as]]) ? none[io.IO_pkt2] : - let midAsid := ((currINFIdx == 0 && seg2Len > 0 && seg3Len > 0) ? some(get(leftAsidSeq)[len(get(leftAsidSeq))-1]) : - (currINFIdx == 2 && seg2Len > 0) ? some(get(rightAsidSeq)[0]) : none[io.IO_as]) in - let midAsidSeq := asidsForMidSeg(dp, raw, numINF, currINFIdx + 2, seg1Len, seg2Len, seg3Len, midAsid) in - midAsidSeq == none[seq[io.IO_as]] ? none[io.IO_pkt2] : - some(io.IO_pkt2(io.IO_Packet2{ - CurrSeg : currSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, get(currAsidSeq)), - LeftSeg : leftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, get(leftAsidSeq)), - MidSeg : midSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, get(midAsidSeq)), - RightSeg : rightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, get(rightAsidSeq)), - })) -} - - -ghost -requires 0 <= offset -requires path.InfoFieldOffset(offset) + 8 < len(raw) -requires acc(&raw[path.InfoFieldOffset(offset) + 4], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 5], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 6], _) -requires acc(&raw[path.InfoFieldOffset(offset) + 7], _) -decreases -pure func timestamp(raw []byte, offset int) io.IO_ainfo { - return let idx := path.InfoFieldOffset(offset) + 4 in - io.IO_ainfo(binary.BigEndian.Uint32(raw[idx : idx + 4])) + let segLen := scion.LengthOfCurrSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let prevSegLen := scion.LengthOfPrevSeg(currHFIdx, seg1Len, seg2Len, seg3Len) in + let numINF := scion.NumInfoFields(seg1Len, seg2Len, seg3Len) in + let offset := scion.HopFieldOffset(numINF, 0, headerOffset) in + io.IO_pkt2(io.IO_Packet2{ + CurrSeg : scion.CurrSeg(raw, offset+prevSegLen, currINFIdx, currHFIdx-prevSegLen, segLen, headerOffset), + LeftSeg : scion.LeftSeg(raw, currINFIdx + 1, seg1Len, seg2Len , seg3Len, headerOffset), + MidSeg : scion.MidSeg(raw, currINFIdx + 2, seg1Len, seg2Len , seg3Len, headerOffset), + RightSeg : scion.RightSeg(raw, currINFIdx - 1, seg1Len, seg2Len , seg3Len, headerOffset), + }) } ghost @@ -535,66 +76,47 @@ requires forall idx int :: {hopfields[idx]} 0 <= idx && idx < len(hopfields) ==> len(hopfields[idx].HVF.MsgTerm_Hash_.MsgTerm_MPair_2.MsgTerm_L_) > 0 decreases pure func uInfo(hopfields seq[io.IO_HF], currHFIdx int, consDir bool) set[io.IO_msgterm] { - return currHFIdx == len(hopfields) ? - hvfSet(hopfields[currHFIdx-1]) : - (currHFIdx == 0 ? + return currHFIdx + 1 >= len(hopfields) ? + hvfSet(hopfields[len(hopfields)-1]) : + (consDir ? hvfSet(hopfields[currHFIdx]) : - (consDir ? - hvfSet(hopfields[currHFIdx]) : - hvfSet(hopfields[currHFIdx-1]))) + hvfSet(hopfields[currHFIdx+1])) } ghost -decreases -pure func ifsToIO_ifs(ifs uint16) option[io.IO_ifs]{ - return ifs == 0 ? none[io.IO_ifs] : some(io.IO_ifs(ifs)) -} - -ghost -opaque -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) -decreases -pure func validPktMetaHdr(raw []byte) bool { - return len(raw) > 4 && - unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) in - let hdr := binary.BigEndian.Uint32(raw[0:4]) in - let metaHdr := scion.DecodedFrom(hdr) in - let seg1 := int(metaHdr.SegLen[0]) in - let seg2 := int(metaHdr.SegLen[1]) in - let seg3 := int(metaHdr.SegLen[2]) in - let base := scion.Base{metaHdr, - numInfoFields(seg1, seg2, seg3), - seg1 + seg2 + seg3} in - metaHdr.InBounds() && - 0 < metaHdr.SegLen[0] && - base.ValidCurrInfSpec() && - base.ValidCurrHfSpec() && - len(raw) >= pktLen(seg1, seg2, seg3) -} - -ghost -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Unsupported -ensures val.IO_val_Unsupported_1 == ifsToIO_ifs(ingressID) +ensures val.IO_val_Unsupported_1 == path.ifsToIO_ifs(ingressID) decreases pure func absIO_val_Unsupported(raw []byte, ingressID uint16) (val io.IO_val) { return io.IO_val(io.IO_val_Unsupported{ - ifsToIO_ifs(ingressID), + path.ifsToIO_ifs(ingressID), io.Unit(io.Unit_{}), }) } ghost +opaque requires dp.Valid() -requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), _) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) ensures val.isIO_val_Pkt2 || val.isIO_val_Unsupported decreases pure func absIO_val(dp io.DataPlaneSpec, raw []byte, ingressID uint16) (val io.IO_val) { - return (reveal validPktMetaHdr(raw) && absPkt(dp, raw, dp.asid()) != none[io.IO_pkt2]) ? - io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw, dp.asid()))}) : + return (reveal slayers.ValidPktMetaHdr(raw) && slayers.IsSupportedPkt(raw)) ? + io.IO_val(io.IO_val_Pkt2{path.ifsToIO_ifs(ingressID), absPkt(dp, raw)}) : absIO_val_Unsupported(raw, ingressID) } +ghost +requires dp.Valid() +requires respr.OutPkt != nil ==> + acc(sl.AbsSlice_Bytes(respr.OutPkt, 0, len(respr.OutPkt)), R56) +decreases +pure func absReturnErr(dp io.DataPlaneSpec, respr processResult) (val io.IO_val) { + return respr.OutPkt == nil ? io.IO_val_Unit{} : + absIO_val(dp, respr.OutPkt, respr.EgressID) +} + ghost requires acc(&d.localIA, _) decreases @@ -625,7 +147,8 @@ ghost requires acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) decreases pure func (d *DataPlane) dpSpecWellConfiguredLinkTypes(dp io.DataPlaneSpec) bool { - return forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> + return !(0 in domain(d.linkTypes)) && + forall ifs uint16 :: {ifs in domain(d.linkTypes)} ifs in domain(d.linkTypes) ==> io.IO_ifs(ifs) in domain(dp.GetLinkTypes()) && absLinktype(d.linkTypes[ifs]) == dp.GetLinkType(io.IO_ifs(ifs)) } @@ -640,3 +163,32 @@ pure func (d *DataPlane) DpAgreesWithSpec(dp io.DataPlaneSpec) bool { d.dpSpecWellConfiguredNeighborIAs(dp) && d.dpSpecWellConfiguredLinkTypes(dp) } + +ghost +requires acc(d.Mem(), _) +requires d.DpAgreesWithSpec(dp) +ensures acc(&d.linkTypes, _) && (d.linkTypes != nil ==> acc(d.linkTypes, _)) +ensures d.dpSpecWellConfiguredLinkTypes(dp) +decreases +func (d *DataPlane) LinkTypesLemma(dp io.DataPlaneSpec) { + reveal d.DpAgreesWithSpec(dp) + unfold acc(d.Mem(), _) +} + +ghost +requires dp.Valid() +requires acc(msg.Mem(), R50) +decreases +pure func MsgToAbsVal(dp io.DataPlaneSpec, msg *ipv4.Message, ingressID uint16) (res io.IO_val) { + return unfolding acc(msg.Mem(), R50) in + absIO_val(dp, msg.Buffers[0], ingressID) +} + +// This assumption will be dropped as soon as we can establish that the contents +// of the underlying buffer did not change between the call to `decodeLayers` and +// `p.processSCION` in the function `processPkt` in the router. +ghost +ensures absPkt.isIO_val_Pkt2 ==> + ElemWitness(ioSharedArg.IBufY, path.ifsToIO_ifs(ingressID), absPkt.IO_val_Pkt2_2) +decreases +func TemporaryAssumeForIOWitness(absPkt io.IO_val, ingressID uint16, ioSharedArg SharedArg) \ No newline at end of file diff --git a/router/widen-lemma.gobra b/router/widen-lemma.gobra new file mode 100644 index 000000000..61580235e --- /dev/null +++ b/router/widen-lemma.gobra @@ -0,0 +1,972 @@ +// Copyright 2022 ETH Zurich +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +gobra + +package router + +import ( + sl "verification/utils/slices" + "verification/io" + . "verification/utils/definitions" + "verification/dependencies/encoding/binary" + "github.com/scionproto/scion/pkg/slayers/path" + "github.com/scionproto/scion/pkg/slayers/path/scion" +) + +// Some thins in this file can be simplified. Nonetheless, the important definition here +// is absIO_valWidenLemma. Everything else can be seen as an implementation detail. +// TODO: prove Lemma +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +preserves dp.Valid() +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R49) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R49) +ensures absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) +decreases +func absIO_valWidenLemma(dp io.DataPlaneSpec, raw []byte, ingressID uint16, length int) +/* { + var ret1 io.IO_val + var ret2 io.IO_val + + if (validPktMetaHdr(raw[:length]) && absPkt(dp, raw[:length]) != none[io.IO_pkt2]) { + validPktMetaHdrWidenLemma(raw, length) + assert validPktMetaHdr(raw) + absPktWidenLemma(dp, raw, length) + assert absPkt(dp, raw) != none[io.IO_pkt2] + + ret1 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw))}) + ret2 = io.IO_val(io.IO_val_Pkt2{ifsToIO_ifs(ingressID), get(absPkt(dp, raw[:length]))}) + assert ret1 == reveal absIO_val(dp, raw, ingressID) + assert ret2 == reveal absIO_val(dp, raw[:length], ingressID) + assert ret1 == ret2 + assert absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2 ==> + absIO_val(dp, raw[:length], ingressID) == absIO_val(dp, raw, ingressID) + } else { + assert !(reveal absIO_val(dp, raw[:length], ingressID).isIO_val_Pkt2) + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures validPktMetaHdr(raw) +decreases +func validPktMetaHdrWidenLemma(raw []byte, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + reveal validPktMetaHdr(raw[:length]) + ret1 := reveal validPktMetaHdr(raw) + ret2 := reveal validPktMetaHdr(raw[:length]) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +requires validPktMetaHdr(raw) +requires validPktMetaHdr(raw[:length]) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R50) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R50) +ensures validPktMetaHdr(raw) +ensures validPktMetaHdr(raw[:length]) +ensures absPkt(dp, raw) == absPkt(dp, raw[:length]) +decreases +func absPktWidenLemma(dp io.DataPlaneSpec, raw []byte, length int) { + + // declarations + var last1 io.IO_as + var last2 io.IO_as + var first1 io.IO_as + var first2 io.IO_as + var leftAsidSeq1 option[seq[io.IO_as]] + var leftAsidSeq2 option[seq[io.IO_as]] + var rightAsidSeq1 option[seq[io.IO_as]] + var rightAsidSeq2 option[seq[io.IO_as]] + var midAsidSeq1 option[seq[io.IO_as]] + var midAsidSeq2 option[seq[io.IO_as]] + var midAsid1 option[io.IO_as] + var midAsid2 option[io.IO_as] + var ret1 option[io.IO_pkt2] + var ret2 option[io.IO_pkt2] + var lm bool + + // abspkt step by step + _ := reveal validPktMetaHdr(raw) + _ := reveal validPktMetaHdr(raw[:length]) + hdr1 := unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[0:4]) + hdr2 := unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) == unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) in binary.BigEndian.Uint32(raw[:length][0:4]) + assert hdr1 == hdr2 + + metaHdr1 := scion.DecodedFrom(hdr1) + metaHdr2 := scion.DecodedFrom(hdr2) + assert metaHdr1 == metaHdr2 + + currINFIdx1 := int(metaHdr1.CurrINF) + currINFIdx2 := int(metaHdr2.CurrINF) + assert currINFIdx1 == currINFIdx2 + + currHFIdx1 := int(metaHdr1.CurrHF) + currHFIdx2 := int(metaHdr2.CurrHF) + assert currHFIdx1 == currHFIdx2 + + seg1Len1 := int(metaHdr1.SegLen[0]) + seg1Len2 := int(metaHdr2.SegLen[0]) + assert seg1Len1 == seg1Len2 + + seg2Len1 := int(metaHdr1.SegLen[1]) + seg2Len2 := int(metaHdr2.SegLen[1]) + assert seg2Len1 == seg2Len2 + + seg3Len1 := int(metaHdr1.SegLen[2]) + seg3Len2 := int(metaHdr2.SegLen[2]) + assert seg3Len1 == seg3Len2 + + segLen1 := lengthOfCurrSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + segLen2 := lengthOfCurrSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert segLen1 == segLen2 + + prevSegLen1 := lengthOfPrevSeg(currHFIdx1, seg1Len1, seg2Len1, seg3Len1) + prevSegLen2 := lengthOfPrevSeg(currHFIdx2, seg1Len2, seg2Len2, seg3Len2) + assert prevSegLen1 == prevSegLen2 + + numINF1 := numInfoFields(seg1Len1, seg2Len1, seg3Len1) + numINF2 := numInfoFields(seg1Len2, seg2Len2, seg3Len2) + assert numINF1 == numINF2 + + offset1 := hopFieldOffset(numINF1, 0) + offset2 := hopFieldOffset(numINF2, 0) + assert offset1 == offset2 + + consDir1 := path.ConsDir(raw, currINFIdx1) + consDir2 := path.ConsDir(raw[:length], currINFIdx2) + consDirWidenLemma(raw, length, currINFIdx1) + assert consDir1 == consDir2 + + asidForCurrSegWidenLemma(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid(), length) + currAsidSeq2 := asidForCurrSeg(dp, raw, numINF1, currHFIdx1, prevSegLen1+segLen1, prevSegLen1, consDir1, dp.Asid()) + currAsidSeq1 := asidForCurrSeg(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2+segLen2, prevSegLen2, consDir2, dp.Asid()) + assert currAsidSeq1 == currAsidSeq2 + + if (currAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + + last1 = get(currAsidSeq1)[segLen1-1] + last2 = get(currAsidSeq2)[segLen1-1] + assert last1 == last2 + + first1 = get(currAsidSeq1)[0] + first2 = get(currAsidSeq2)[0] + assert first1 == first2 + + asidsForLeftSegWidenLemma(dp, raw, numINF1, currINFIdx1+1, seg1Len1, seg2Len1, seg3Len1, last1, length) + leftAsidSeq1 = asidsForLeftSeg(dp, raw, numINF1, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, last1) + leftAsidSeq2 = asidsForLeftSeg(dp, raw[:length], numINF2, currINFIdx2 + 1, seg1Len2, seg2Len2, seg3Len2, last2) + assert leftAsidSeq1 == leftAsidSeq2 + + asidsForRightSegWidenLemma(dp, raw, numINF1, currINFIdx1-1, seg1Len1, seg2Len1, seg3Len1, first1, length) + rightAsidSeq1 = asidsForRightSeg(dp, raw, numINF1, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, first1) + rightAsidSeq2 = asidsForRightSeg(dp, raw[:length], numINF2, currINFIdx2 - 1, seg1Len2, seg2Len2, seg3Len2, first2) + assert rightAsidSeq1 == rightAsidSeq2 + + if (leftAsidSeq1 == none[seq[io.IO_as]] || rightAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + assert leftAsidSeq2 != none[seq[io.IO_as]] && rightAsidSeq2 != none[seq[io.IO_as]] + + midAsid1 = ((currINFIdx1 == 0 && seg2Len1 > 0 && seg3Len1 > 0) ? some(get(leftAsidSeq1)[len(get(leftAsidSeq1))-1]) : (currINFIdx1 == 2 && seg2Len1 > 0) ? some(get(rightAsidSeq1)[0]) : none[io.IO_as]) + midAsid2 = ((currINFIdx2 == 0 && seg2Len2 > 0 && seg3Len2 > 0) ? some(get(leftAsidSeq2)[len(get(leftAsidSeq2))-1]) : (currINFIdx2 == 2 && seg2Len2 > 0) ? some(get(rightAsidSeq2)[0]) : none[io.IO_as]) + assert midAsid1 == midAsid2 + + asidsForMidSegWidenLemma(dp, raw, numINF1, currINFIdx1+2, seg1Len1, seg2Len1, seg3Len1, midAsid1, length) + midAsidSeq1 = asidsForMidSeg(dp, raw, numINF1, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, midAsid1) + midAsidSeq2 = asidsForMidSeg(dp, raw[:length], numINF2, currINFIdx2 + 2, seg1Len2, seg2Len2, seg3Len2, midAsid2) + assert midAsidSeq1 == midAsidSeq2 + if (midAsidSeq1 == none[seq[io.IO_as]]) { + ret := none[io.IO_pkt2] + assert ret == reveal absPkt(dp, raw) + assert ret == reveal absPkt(dp, raw[:length]) + } else { + currSegWidenLemma(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1), length) + leftSegWidenLemma(raw, currINFIdx1 + 1, seg1Len1, seg2Len1, seg3Len1, get(leftAsidSeq1), length) + midSegWidenLemma(raw, currINFIdx1 + 2, seg1Len1, seg2Len1, seg3Len1, get(midAsidSeq1), length) + rightSegWidenLemma(raw, currINFIdx1 - 1, seg1Len1, seg2Len1, seg3Len1, get(rightAsidSeq1), length) + ret1 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw, offset1+prevSegLen1, currINFIdx1, currHFIdx1-prevSegLen1, get(currAsidSeq1)), + LeftSeg : leftSeg(raw, currINFIdx1 + 1, seg1Len1, seg2Len1 , seg3Len1, get(leftAsidSeq1)), + MidSeg : midSeg(raw, currINFIdx1 + 2, seg1Len1, seg2Len1 , seg3Len1, get(midAsidSeq1)), + RightSeg : rightSeg(raw, currINFIdx1 - 1, seg1Len1, seg2Len1 , seg3Len1, get(rightAsidSeq1)), + })) + ret2 = some(io.IO_pkt2(io.IO_Packet2{ + CurrSeg : currSeg(raw[:length], offset2+prevSegLen2, currINFIdx2, currHFIdx2-prevSegLen2, get(currAsidSeq2)), + LeftSeg : leftSeg(raw[:length], currINFIdx2 + 1, seg1Len2, seg2Len2 , seg3Len2, get(leftAsidSeq2)), + MidSeg : midSeg(raw[:length], currINFIdx2 + 2, seg1Len2, seg2Len2 , seg3Len2, get(midAsidSeq2)), + RightSeg : rightSeg(raw[:length], currINFIdx2 - 1, seg1Len2, seg2Len2 , seg3Len2, get(rightAsidSeq2)), + })) + reveal absPkt(dp, raw) + reveal absPkt(dp, raw[:length]) + assert ret1 == absPkt(dp, raw) + assert ret2 == absPkt(dp, raw[:length]) + assert ret1 == ret2 + } + } + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +requires 0 <= currINFIdx +requires path.InfoFieldOffset(currINFIdx) < length +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +ensures path.ConsDir(raw, currINFIdx) == path.ConsDir(raw[:length], currINFIdx) +decreases +func consDirWidenLemma(raw []byte, length int, currINFIdx int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) + assert &raw[path.InfoFieldOffset(currINFIdx)] == &raw[:length][path.InfoFieldOffset(currINFIdx)] + assert raw[path.InfoFieldOffset(currINFIdx)] == raw[:length][path.InfoFieldOffset(currINFIdx)] + assert (raw[path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) == (raw[:length][path.InfoFieldOffset(currINFIdx)] & 0x1 == 0x1) + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R56) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R56) +} + +ghost +requires 0 <= length && length <= len(raw) +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires currHFIdx1 < segLen1 +requires hopFieldOffset(numINF1, segLen1) <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) == + asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) +decreases +func asidForCurrSegWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + currHFIdx1 int, + segLen1 int, + prevSegLen1 int, + consDir1 bool, + asid1 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var left1 option[seq[io.IO_as]] + var left2 option[seq[io.IO_as]] + var right1 option[seq[io.IO_as]] + var right2 option[seq[io.IO_as]] + + + if (segLen1 == 0) { + assert segLen1 == 0 + ret1 = some(seq[io.IO_as]{}) + ret2 = some(seq[io.IO_as]{}) + assert ret1 == asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + asidsBeforeWidenLemma(dp, raw, numINF1, numINF1, currHFIdx1, currHFIdx1, prevSegLen1, prevSegLen1, consDir1, consDir1, asid1, asid1, length) + left1 = asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + left2 = asidsBefore(dp, raw[:length], numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert left1 == left2 + newP := (R52 + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1, length, newP) + right1 = asidsAfter(dp, raw, numINF1, currHFIdx1, segLen1, consDir1, asid1) + right2 = asidsAfter(dp, raw[:length], numINF1, currHFIdx1, segLen1, consDir1, asid1) + assert right1 == right2 + if (left1 == none[seq[io.IO_as]] || right1 == none[seq[io.IO_as]]) { + assert (left2 == none[seq[io.IO_as]] || right2 == none[seq[io.IO_as]]) + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } else { + assert (left2 != none[seq[io.IO_as]] && right2 != none[seq[io.IO_as]]) + ret1 = some(get(left1) ++ get(right1)[1:]) + ret2 = some(get(left2) ++ get(right2)[1:]) + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 + } + } + assert ret1 == reveal asidForCurrSeg(dp, raw, numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret2 == reveal asidForCurrSeg(dp, raw[:length], numINF1, currHFIdx1, segLen1, prevSegLen1, consDir1, asid1) + assert ret1 == ret2 +} + +ghost +requires 1 <= numINF1 +requires 0 <= prevSegLen1 && prevSegLen1 <= currHFIdx1 +requires length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires consDir1 == consDir2 +requires prevSegLen1 == prevSegLen2 +requires currHFIdx1 == currHFIdx2 +requires numINF1 == numINF2 +requires asid1 == asid2 +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +ensures forall i int :: { &raw[i] } 0 <= i && i < len(raw) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) == (unfolding acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) in raw[i]) +ensures forall i int :: { &raw[:length][i] } 0 <= i && i < len(raw[:length]) ==> old(unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) == (unfolding acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) in raw[:length][i]) +ensures asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) == + asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) +decreases currHFIdx1 - prevSegLen1 +func asidsBeforeWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + prevSegLen1 int, + prevSegLen2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + + var ret1 option[seq[io.IO_as]] + var ret2 option[seq[io.IO_as]] + var nextAsid1 option[io.IO_as] + var nextAsid2 option[io.IO_as] + var nextAsidSeq1 option[seq[io.IO_as]] + var nextAsidSeq2 option[seq[io.IO_as]] + + if (currHFIdx1 == prevSegLen1) { + assert currHFIdx2 == prevSegLen2 + ret1 = some(seq[io.IO_as]{asid1}) + ret2 = some(seq[io.IO_as]{asid2}) + assert ret1 == ret2 + } else { + assert currHFIdx2 != prevSegLen2 + nextAsid1 = asidFromIfs(dp, raw, numINF1, currHFIdx1, !consDir1, asid1) + nextAsid2 = asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, !consDir2, asid2) + asidFromIfsWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1, currHFIdx2, !consDir1, !consDir2, asid1, asid2, length) + assert nextAsid1 == nextAsid2 + if (nextAsid1 == none[io.IO_as]) { + assert nextAsid2 == none[io.IO_as] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + assert nextAsid2 != none[io.IO_as] + asidsBeforeWidenLemma(dp, raw, numINF1, numINF2, currHFIdx1-1, currHFIdx2-1, prevSegLen1, prevSegLen2, consDir1, consDir2, get(nextAsid1), get(nextAsid2), length) + nextAsidSeq1 = asidsBefore(dp, raw, numINF1, currHFIdx1-1, prevSegLen1, consDir1, get(nextAsid1)) + nextAsidSeq2 = asidsBefore(dp, raw[:length], numINF2, currHFIdx2-1, prevSegLen2, consDir2, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if (nextAsidSeq1 == none[seq[io.IO_as]]) { + assert nextAsidSeq2 == none[seq[io.IO_as]] + ret1 = none[seq[io.IO_as]] + ret2 = none[seq[io.IO_as]] + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } else { + ret1 = some(get(nextAsidSeq1) ++ seq[io.IO_as]{asid1}) + ret2 = some(get(nextAsidSeq2) ++ seq[io.IO_as]{asid2}) + assert ret1 == ret2 + assert ret1 == asidsBefore(dp, raw, numINF1, currHFIdx1, prevSegLen1, consDir1, asid1) + assert ret2 == asidsBefore(dp, raw[:length], numINF2, currHFIdx2, prevSegLen2, consDir2, asid2) + } + } + } +} + +ghost +requires 1 <= numINF1 +requires 0 <= currHFIdx1 +requires numINF1 == numINF2 +requires currHFIdx1 == currHFIdx2 +requires consDir1 == consDir2 +requires asid1 == asid2 +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF1, currHFIdx1) + path.HopLen <= length +requires dp.Valid() +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) == + asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +decreases +func asidFromIfsWidenLemma( + dp io.DataPlaneSpec, + raw []byte, + numINF1 int, + numINF2 int, + currHFIdx1 int, + currHFIdx2 int, + consDir1 bool, + consDir2 bool, + asid1 io.IO_as, + asid2 io.IO_as, + length int) { + var ret1 option[io.IO_as] + var ret2 option[io.IO_as] + + idx1 := hopFieldOffset(numINF1, currHFIdx1) + idx2 := hopFieldOffset(numINF2, currHFIdx1) + assert idx1 == idx2 + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert forall i int :: { &raw[idx1+2+i] } { &raw[idx1+2:idx1+4][i] } 0 <= i && i < 2 ==> + &raw[idx1+2+i] == &raw[idx1+2:idx1+4][i] + assert forall i int :: { &raw[:length][idx2+2+i] } { &raw[:length][idx2+2:idx2+4][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+2+i] == &raw[:length][idx2+2:idx2+4][i] + assert forall i int :: { &raw[idx1+4+i] } { &raw[idx1+4:idx1+6][i] } 0 <= i && i < 2 ==> + &raw[idx1+4+i] == &raw[idx1+4:idx1+6][i] + assert forall i int :: { &raw[:length][idx2+4+i] } { &raw[idx2+4:idx2+6][i] } 0 <= i && i < 2 ==> + &raw[:length][idx2+4+i] == &raw[:length][idx2+4:idx2+6][i] + ifs1 := consDir1 ? binary.BigEndian.Uint16(raw[idx1+4:idx1+6]) : binary.BigEndian.Uint16(raw[idx1+2:idx1+4]) + ifs2 := consDir2 ? binary.BigEndian.Uint16(raw[:length][idx2+4:idx2+6]) : binary.BigEndian.Uint16(raw[:length][idx2+2:idx2+4]) + assert ifs1 == ifs2 + asIfPair1 := io.AsIfsPair{asid1, io.IO_ifs(ifs1)} + asIfPair2 := io.AsIfsPair{asid2, io.IO_ifs(ifs2)} + assert asIfPair1 == asIfPair2 + if (asIfPair1 in domain(dp.GetLinks())) { + assert asIfPair2 in domain(dp.GetLinks()) + ret1 = some(dp.Lookup(asIfPair1).asid) + ret2 = some(dp.Lookup(asIfPair2).asid) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } else { + assert !(asIfPair2 in domain(dp.GetLinks())) + ret1 = none[io.IO_as] + ret2 = none[io.IO_as] + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) + } + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + assert ret1 == ret2 + assert ret1 == asidFromIfs(dp, raw, numINF1, currHFIdx1, consDir1, asid1) + assert ret2 == asidFromIfs(dp, raw[:length], numINF2, currHFIdx2, consDir2, asid2) +} + +// --- The file has been simplified past this point + +ghost +requires R53 < p +requires 1 <= numINF +requires 0 <= currHFIdx && currHFIdx < segLen +requires length <= len(raw) +requires hopFieldOffset(numINF, segLen) <= length +requires dp.Valid() +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) == + asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) +decreases segLen - currHFIdx + 1 +func asidsAfterWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currHFIdx int, segLen int, consDir bool, asid io.IO_as, length int, p perm) { + if currHFIdx != segLen - 1 { + nextAsid1 := asidFromIfs(dp, raw, numINF, currHFIdx, consDir, asid) + nextAsid2 := asidFromIfs(dp, raw[:length], numINF, currHFIdx, consDir, asid) + asidFromIfsWidenLemma(dp, raw, numINF, numINF, currHFIdx, currHFIdx, consDir, consDir, asid, asid, length) + assert nextAsid1 == nextAsid2 + if nextAsid1 == none[io.IO_as] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + newP := (p + R53)/2 + asidsAfterWidenLemma(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1), length, newP) + nextAsidSeq1 := asidsAfter(dp, raw, numINF, currHFIdx+1, segLen, consDir, get(nextAsid1)) + nextAsidSeq2 := asidsAfter(dp, raw[:length], numINF, currHFIdx+1, segLen, consDir, get(nextAsid2)) + assert nextAsidSeq1 == nextAsidSeq2 + if nextAsidSeq1 == none[seq[io.IO_as]] { + ret := none[seq[io.IO_as]] + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } else { + ret := some(seq[io.IO_as]{asid} ++ get(nextAsidSeq1)) + assert ret == asidsAfter(dp, raw, numINF, currHFIdx, segLen, consDir, asid) + assert ret == asidsAfter(dp, raw[:length], numINF, currHFIdx, segLen, consDir, asid) + } + } + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 1 <= currINFIdx && currINFIdx < 4 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForLeftSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid io.IO_as, length int) { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + + if currINFIdx == 1 && seg2Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir2, asid) + assert ret1 == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForLeftSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForLeftSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires -1 <= currINFIdx && currINFIdx < 2 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForRightSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int,seg3Len int, asid io.IO_as, length int) { + if currINFIdx == 1 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len+seg2Len-1, seg1Len+seg2Len, seg1Len, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 { + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + consDirWidenLemma(raw, length, currINFIdx) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid, length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, asid) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, asid) + assert ret1 == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForRightSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForRightSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires dp.Valid() +requires 1 <= numINF +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires hopFieldOffset(numINF, seg1Len + seg2Len + seg3Len) <= length +requires currINFIdx <= numINF + 1 +requires 2 <= currINFIdx && currINFIdx < 5 +requires (currINFIdx == 4 && seg2Len > 0) ==> asid != none[io.IO_as] +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> asid != none[io.IO_as] +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func asidsForMidSegWidenLemma(dp io.DataPlaneSpec, raw []byte, numINF int, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid option[io.IO_as], length int) { + if currINFIdx == 4 && seg2Len > 0 { + consDir1 := path.ConsDir(raw, 1) + consDir2 := path.ConsDir(raw[:length], 1) + consDirWidenLemma(raw, length, 1) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len-1, seg1Len, 0, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len-1, seg1Len, 0, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + consDir1 := path.ConsDir(raw, 2) + consDir2 := path.ConsDir(raw[:length], 2) + consDirWidenLemma(raw, length, 2) + assert consDir1 == consDir2 + asidForCurrSegWidenLemma(dp, raw, numINF, seg1Len+seg2Len, seg1Len+seg2Len+seg3Len, seg1Len+seg2Len, consDir1, get(asid), length) + ret1 := asidForCurrSeg(dp, raw, numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir1, get(asid)) + ret2 := asidForCurrSeg(dp, raw[:length], numINF, seg1Len + seg2Len, seg1Len + seg2Len + seg3Len, seg1Len + seg2Len, consDir2, get(asid)) + assert ret1 == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := some(seq[io.IO_as]{}) + assert ret == reveal asidsForMidSeg(dp, raw, numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal asidsForMidSeg(dp, raw[:length], numINF, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires path.InfoFieldOffset(currINFIdx) + path.InfoLen <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= currINFIdx && currINFIdx < 3 +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures currSeg(raw, offset, currINFIdx, currHFIdx, asid) == + currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) +decreases +func currSegWidenLemma(raw []byte, offset int, currINFIdx int, currHFIdx int, asid seq[io.IO_as], length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) + + ainfo1 := path.Timestamp(raw, currINFIdx) + ainfo2 := path.Timestamp(raw[:length], currINFIdx) + assert ainfo1 == ainfo2 + + consDir1 := path.ConsDir(raw, currINFIdx) + consDir2 := path.ConsDir(raw[:length], currINFIdx) + assert consDir1 == consDir2 + + peer1 := path.Peer(raw, currINFIdx) + peer2 := path.Peer(raw[:length], currINFIdx) + assert peer1 == peer2 + + segmentWidenLemma(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1, length) + ret1 := segment(raw, offset, currHFIdx, asid, ainfo1, consDir1, peer1) + ret2 := segment(raw[:length], offset, currHFIdx, asid, ainfo2, consDir2, peer2) + assert ret1 == reveal currSeg(raw, offset, currINFIdx, currHFIdx, asid) + assert ret2 == reveal currSeg(raw[:length], offset, currINFIdx, currHFIdx, asid) + assert ret1 == ret2 + + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R53) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R53) +} + +ghost +requires 0 <= offset +requires 0 < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +requires acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R52) +ensures acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R52) +ensures segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) +decreases +func segmentWidenLemma(raw []byte, offset int, currHFIdx int, asid seq[io.IO_as], ainfo io.IO_ainfo, consDir bool, peer bool, length int) { + newP := (R52 + R53)/2 + assert R53 < newP && newP < R52 + hopFieldsConsDirWidenLemma(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopFieldsNotConsDirWidenLemma(raw, offset, len(asid)-1, set[io.IO_msgterm]{}, asid, ainfo, length, newP) + hopfields1 := consDir ? hopFieldsConsDir(raw, offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw, offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + hopfields2 := consDir ? hopFieldsConsDir(raw[:length], offset, 0, set[io.IO_msgterm]{}, asid, ainfo) : hopFieldsNotConsDir(raw[:length], offset, len(asid) - 1, set[io.IO_msgterm]{}, asid, ainfo) + assert hopfields1 == hopfields2 + + uinfo := uInfo(hopfields1, currHFIdx, consDir) + + ret1 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields1, currHFIdx - 1), + Future : segFuture(hopfields1, currHFIdx), + History : segHistory(hopfields1, currHFIdx - 1), + }) + ret2 := io.IO_seg2(io.IO_seg3_{ + AInfo :ainfo, + UInfo : uinfo, + ConsDir : consDir, + Peer : peer, + Past : segPast(hopfields2, currHFIdx - 1), + Future : segFuture(hopfields2, currHFIdx), + History : segHistory(hopfields2, currHFIdx - 1), + }) + assert ret1 == segment(raw, offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret2 == segment(raw[:length], offset, currHFIdx, asid, ainfo, consDir, peer) + assert ret1 == ret2 +} + +ghost +requires R53 < p +requires 0 <= offset +requires 0 <= currHFIdx && currHFIdx <= len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * len(asid) <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases len(asid) - currHFIdx +func hopFieldsConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == len(asid) { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsConsDirWidenLemma(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := seq[io.IO_HF]{hf1} ++ hopFieldsConsDir(raw, offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) + ret2 := seq[io.IO_HF]{hf2} ++ hopFieldsConsDir(raw[:length], offset, currHFIdx + 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) + assert ret1 == hopFieldsConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 <= length && length <= len(raw) +requires idx + path.HopLen <= length +requires 0 <= idx +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R54) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R54) +ensures hopField(raw, idx, beta, asid, ainfo) == hopField(raw[:length], idx, beta, asid, ainfo) +decreases +func hopFieldWidenLemma(raw []byte, idx int, beta set[io.IO_msgterm], asid io.IO_as, ainfo io.IO_ainfo, length int) { + unfold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + unfold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) + + assert forall i int :: { &raw[idx+2+i] } { &raw[idx+2:idx+4][i] } 0 <= i && i < 2 ==> &raw[idx+2+i] == &raw[idx+2:idx+4][i] + assert forall i int :: { &raw[idx+4+i] } { &raw[idx+4:idx+6][i] } 0 <= i && i < 2 ==> &raw[idx+4+i] == &raw[idx+4:idx+6][i] + inif21 := binary.BigEndian.Uint16(raw[idx+2:idx+4]) + inif22 := binary.BigEndian.Uint16(raw[:length][idx+2:idx+4]) + assert inif21 == inif22 + + egif2 := binary.BigEndian.Uint16(raw[idx+4:idx+6]) + op_inif2 := inif21 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(inif21)) + op_egif2 := egif2 == 0 ? none[io.IO_ifs] : some(io.IO_ifs(egif2)) + ts := io.IO_msgterm(io.MsgTerm_Num{ainfo}) + l := io.IO_msgterm(io.MsgTerm_L{seq[io.IO_msgterm]{ts, io.if2term(op_inif2), io.if2term(op_egif2), io.IO_msgterm(io.MsgTerm_FS{beta})}}) + hvf := io.mac(io.macKey(io.asidToKey(asid)), l) + + ret1 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + ret2 := io.IO_HF(io.IO_HF_{ + InIF2 : op_inif2, + EgIF2 : op_egif2, + HVF : hvf, + }) + assert ret1 == hopField(raw, idx, beta, asid, ainfo) + assert ret2 == hopField(raw[:length], idx, beta, asid, ainfo) + assert ret1 == ret2 + fold acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R55) + fold acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R55) +} + +ghost +requires R53 < p +requires 0 <= offset +requires -1 <= currHFIdx && currHFIdx < len(asid) +requires 0 <= length && length <= len(raw) +requires offset + path.HopLen * currHFIdx + path.HopLen <= length +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), p) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), p) +ensures hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) == + hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) +decreases currHFIdx + 1 +func hopFieldsNotConsDirWidenLemma(raw []byte, offset int, currHFIdx int, beta set[io.IO_msgterm], asid seq[io.IO_as], ainfo io.IO_ainfo, length int, p perm) { + if currHFIdx == -1 { + ret := seq[io.IO_HF]{} + assert ret == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + } else { + hopFieldWidenLemma(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo, length) + hf1 := hopField(raw, offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + hf2 := hopField(raw[:length], offset + path.HopLen * currHFIdx, beta, asid[currHFIdx], ainfo) + assert hf1 == hf2 + + newP := (p + R53)/2 + assert R53 < newP && newP < p + hopFieldsNotConsDirWidenLemma(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo, length, newP) + ret1 := hopFieldsNotConsDir(raw, offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf1.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf1} + ret2 := hopFieldsNotConsDir(raw[:length], offset, currHFIdx - 1, (beta union set[io.IO_msgterm]{hf2.HVF}), asid, ainfo) ++ seq[io.IO_HF]{hf2} + assert ret1 == hopFieldsNotConsDir(raw, offset, currHFIdx, beta, asid, ainfo) + assert ret2 == hopFieldsNotConsDir(raw[:length], offset, currHFIdx, beta, asid, ainfo) + assert ret1 == ret2 + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires 1 <= currINFIdx && currINFIdx < 4 +requires (currINFIdx == 1 && seg2Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func leftSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal leftSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal leftSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 < seg1Len +requires 0 <= seg2Len +requires 0 <= seg3Len +requires 0 <= length && length <= len(raw) +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires -1 <= currINFIdx && currINFIdx < 2 +requires (currINFIdx == 1 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg2Len +requires (currINFIdx == 0 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func rightSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 1 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * seg1Len, currINFIdx, seg2Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 0 && seg2Len > 0 { + currSegWidenLemma(raw, offset, currINFIdx, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, currINFIdx, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, currINFIdx, seg1Len, asid)) + assert ret1 == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal rightSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal rightSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} + +ghost +requires 0 <= seg2Len +requires 0 < seg1Len +requires 0 <= length && length <= len(raw) +requires 0 <= seg3Len +requires 2 <= currINFIdx && currINFIdx < 5 +requires pktLen(seg1Len, seg2Len, seg3Len) <= length +requires (currINFIdx == 2 && seg2Len > 0 && seg3Len > 0) ==> len(asid) == seg3Len +requires (currINFIdx == 4 && seg2Len > 0) ==> len(asid) == seg1Len +preserves acc(sl.AbsSlice_Bytes(raw, 0, len(raw)), R51) +preserves acc(sl.AbsSlice_Bytes(raw[:length], 0, len(raw[:length])), R51) +ensures midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) == + midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) +decreases +func midSegWidenLemma(raw []byte, currINFIdx int, seg1Len int, seg2Len int, seg3Len int, asid seq[io.IO_as], length int) { + offset := hopFieldOffset(numInfoFields(seg1Len, seg2Len, seg3Len), 0) + if currINFIdx == 4 && seg2Len > 0 { + currSegWidenLemma(raw, offset, 0, seg1Len, asid, length) + ret1 := some(currSeg(raw, offset, 0, seg1Len, asid)) + ret2 := some(currSeg(raw[:length], offset, 0, seg1Len, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else if currINFIdx == 2 && seg2Len > 0 && seg3Len > 0 { + currSegWidenLemma(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid, length) + ret1 := some(currSeg(raw, offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + ret2 := some(currSeg(raw[:length], offset + path.HopLen * (seg1Len + seg2Len), currINFIdx, 0, asid)) + assert ret1 == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret2 == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret1 == ret2 + } else { + ret := none[io.IO_seg3] + assert ret == reveal midSeg(raw, currINFIdx, seg1Len, seg2Len, seg3Len, asid) + assert ret == reveal midSeg(raw[:length], currINFIdx, seg1Len, seg2Len, seg3Len, asid) + } +} +*/ \ No newline at end of file diff --git a/verification/io/router.gobra b/verification/io/router.gobra index d34d2de3a..0bb6d82b8 100644 --- a/verification/io/router.gobra +++ b/verification/io/router.gobra @@ -72,7 +72,7 @@ pure func asidToKey(asid IO_as) IO_key{ ghost decreases -pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm]{ +pure func upd_uinfo(segid set[IO_msgterm], hf IO_HF) set[IO_msgterm] { return let setHVF := set[IO_msgterm]{hf.HVF} in (segid union setHVF) setminus (segid intersection setHVF) } @@ -129,7 +129,6 @@ pure func (dp DataPlaneSpec) dp3s_forward_ext(m IO_pkt3, newpkt IO_pkt3, nextif let hf1, fut := currseg.Future[0], currseg.Future[1:] in let traversedseg := newpkt.CurrSeg in dp.dp2_forward_ext_guard(dp.Asid(), m, nextif, currseg, traversedseg, newpkt, fut, hf1) && - dp.dp2_check_interface_top(currseg.ConsDir, dp.Asid(), hf1) && (nextif in domain(dp.GetNeighborIAs())) && let a2 := dp.GetNeighborIA(nextif) in let i2 := dp.Lookup(AsIfsPair{dp.Asid(), nextif}).ifs in diff --git a/verification/io/router_events.gobra b/verification/io/router_events.gobra index 51c4dc33a..f20449359 100644 --- a/verification/io/router_events.gobra +++ b/verification/io/router_events.gobra @@ -91,6 +91,7 @@ pure func (dp DataPlaneSpec) dp2_enter_guard(m IO_pkt2, currseg IO_seg2, travers return m.CurrSeg == currseg && currseg.Future == seq[IO_HF]{hf1} ++ fut && dp.dp2_check_interface(currseg.ConsDir, asid, hf1, recvif) && + (dp.dp2_check_interface_top(currseg.ConsDir, asid, hf1) || fut == seq[IO_HF]{}) && update_uinfo(!currseg.ConsDir, currseg, traversedseg, hf1) && same_segment2(currseg, traversedseg) && same_other2(currseg, traversedseg) && diff --git a/verification/io/xover.gobra b/verification/io/xover.gobra index 206d53d55..eff09c6bf 100644 --- a/verification/io/xover.gobra +++ b/verification/io/xover.gobra @@ -103,3 +103,5 @@ decreases pure func (dp DataPlaneSpec) xover_up2down2_link_type_dir(asid IO_as, d1 bool, hf1 IO_HF, d2 bool, hf2 IO_HF) bool { return dp.xover_up2down2_link_type(asid, swap_if_dir2(hf1, d1), swap_if_dir2(hf2, d2)) } + +