Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ops vi #30

Merged
merged 5 commits into from
Apr 15, 2016
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 50 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -33,3 +33,53 @@

*~
*.swp

*.pcap

*.ts

# compiled Python
*.pyc

# Files generated by autotools
.deps
.libs
Makefile
Makefile.in
configure
config.log
config.h
aclocal.m4
autom4te.cache/
compile
config.h.in
config.status
depcomp
install-sh
missing
stamp-h1
targets/simple_router/simple_router
test-driver
.dirstamp
*.log
*.trs
config.guess
config.sub
ltmain.sh
m4/ltoptions.m4
m4/ltsugar.m4
m4/ltversion.m4
m4/lt~obsolete.m4
m4/libtool.m4
py-compile
libtool

bmswitchp4_drivers

gen-cpp
gen-py

tests/run_tests.py

switchapi/switch_api_thrift
switchsai/switch_sai_thrift
46 changes: 46 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,49 @@ switchapi - SwitchAPI
switchlink - Linux netlink listener
tests/ptf-tests - P4 dependent(PD), SAI and API tests
tests/of-tests - Openflow tests


Running switch in bmv2 without p4factory
----------------------------------------
You can now run `switch.p4` in bmv2 without cloning `p4factory`. In order to do
this you first need to install [bmv2]
(https://github.com/p4lang/behavioral-model) and its compiler [p4c-bmv2]
(https://github.com/p4lang/p4c-bm) on your system. Additionally, if you plan on
running the tests for `switch.p4`, please make sure you install [PTF]
(https://github.com/p4lang/ptf) with `sudo python setup.py install`.

Once this is done, you can follow this steps:

./autogen.sh
./configure --with-bmv2 --with-switchsai
make

The `--with-switchsai` flag will make sure that the compiled drivers include
`switchapi` and `switchsai`. If you just need `switchapi`, replace the flag will
`--with-switchapi`. Replace the flag with `--with-switchlink` if you need
`switchlink` as well. If you omit these flags, the drivers will only include the
`PD`.

Note that you should be using a fresh clone for this, not the `switch` submodule
that comes with `p4factory`.

Make sure to look at the output of `configure` to spot any missing dependency.

Once everything has compiled, you can run the tests for `switch.p4` (assuming
you have installed [PTF] (https://github.com/p4lang/ptf). Please make sure that
you have all the necessary veth pairs setup (you can use [tools/veth_setup.sh]
(tools/veth_setup.sh)).

First, start the software switch with:

sudo ./bmv2/run_bm.sh

Then, start the drivers with:

sudo ./bmv2/run_drivers.sh

You can now run all the tests:

sudo ./bmv2/run_tests.sh # for the PD tests
sudo ./bmv2/run_tests.sh --test-dir tests/ptf-tests/api-tests # for the switchapi tests
sudo ./bmv2/run_tests.sh --test-dir tests/ptf-tests/sai-tests # for the switchsai tests
2 changes: 1 addition & 1 deletion p4src/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Supported Features
4. L3 Multicast
5. LAG
6. ECMP
7. Tunneling: VXLAN and NVGRE (including L2/L3 Gateway), Geneve, and GRE
7. Tunneling: VXLAN and NVGRE (including L2/L3 Gateway), Geneve, GRE and IPinIP
8. Basic ACL: MAC and IP ACLs
9. Unicast RPF check
10. MPLS: LER, LSR, IPVPN, VPLS, L2VPN
Expand Down
29 changes: 16 additions & 13 deletions p4src/acl.p4
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,8 @@ header_type acl_metadata_t {
racl_nexthop_type : 1; /* ecmp or nexthop */
acl_redirect : 1; /* ifacl/vacl redirect action */
racl_redirect : 1; /* racl redirect action */
if_label : 15; /* if label for acls */
if_label : 16; /* if label for acls */
bd_label : 16; /* bd label for acls */
mirror_session_id : 10; /* mirror session id */
acl_stats_index : 14; /* acl stats index */
}
}
Expand Down Expand Up @@ -154,7 +153,9 @@ table mac_acl {

control process_mac_acl {
#if !defined(ACL_DISABLE) && !defined(L2_DISABLE)
apply(mac_acl);
if (DO_LOOKUP(ACL)) {
apply(mac_acl);
}
#endif /* !ACL_DISABLE && !L2_DISABLE */
}

Expand Down Expand Up @@ -225,15 +226,17 @@ table ipv6_acl {
/*****************************************************************************/
control process_ip_acl {
#ifndef ACL_DISABLE
if (l3_metadata.lkp_ip_type == IPTYPE_IPV4) {
if (DO_LOOKUP(ACL)) {
if (l3_metadata.lkp_ip_type == IPTYPE_IPV4) {
#ifndef IPV4_DISABLE
apply(ip_acl);
apply(ip_acl);
#endif /* IPV4_DISABLE */
} else {
if (l3_metadata.lkp_ip_type == IPTYPE_IPV6) {
} else {
if (l3_metadata.lkp_ip_type == IPTYPE_IPV6) {
#ifndef IPV6_DISABLE
apply(ipv6_acl);
apply(ipv6_acl);
#endif /* IPV6_DISABLE */
}
}
}
#endif /* ACL_DISABLE */
Expand Down Expand Up @@ -474,9 +477,7 @@ action drop_packet() {
}

action drop_packet_with_reason(drop_reason) {
#ifndef STATS_DISABLE
count(drop_stats, drop_reason);
#endif
drop();
}

Expand Down Expand Up @@ -550,9 +551,11 @@ table drop_stats {
}

control process_system_acl {
apply(system_acl);
if (ingress_metadata.drop_flag == TRUE) {
apply(drop_stats);
if (DO_LOOKUP(SYSTEM_ACL)) {
apply(system_acl);
if (ingress_metadata.drop_flag == TRUE) {
apply(drop_stats);
}
}
}

Expand Down
16 changes: 16 additions & 0 deletions p4src/archdeps.p4
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
/*
archdeps.p4
*/

#ifndef _ARCH_DEPS_H

#define _ARCH_DEPS_H

#define ingress_input_port standard_metadata.ingress_port
#define ingress_egress_port standard_metadata.egress_spec
#define egress_egress_port standard_metadata.egress_port
#define intrinsic_mcast_grp intrinsic_metadata.mcast_grp
#define egress_egress_rid intrinsic_metadata.egress_rid

#endif

4 changes: 2 additions & 2 deletions p4src/egress_filter.p4
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ action egress_filter_check() {
egress_metadata.bd);
}

action egress_filter_drop() {
action set_egress_filter_drop() {
drop();
}

table egress_filter_drop {
actions {
egress_filter_drop;
set_egress_filter_drop;
}
}

Expand Down
108 changes: 44 additions & 64 deletions p4src/fabric.p4
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ metadata fabric_metadata_t fabric_metadata;
/* Fabric header - destination lookup */
/*****************************************************************************/
action terminate_cpu_packet() {
modify_field(standard_metadata.egress_spec,
modify_field(ingress_egress_port,
fabric_header.dstPortOrGroup);
modify_field(egress_metadata.bypass, fabric_header_cpu.txBypass);

Expand All @@ -49,7 +49,7 @@ action terminate_cpu_packet() {

#ifdef FABRIC_ENABLE
action terminate_fabric_unicast_packet() {
modify_field(standard_metadata.egress_spec,
modify_field(ingress_egress_port,
fabric_header.dstPortOrGroup);

modify_field(tunnel_metadata.tunnel_terminate,
Expand Down Expand Up @@ -85,7 +85,7 @@ action terminate_fabric_multicast_packet() {
modify_field(l3_metadata.outer_routed,
fabric_header_multicast.outerRouted);

modify_field(intrinsic_metadata.mcast_grp,
modify_field(intrinsic_mcast_grp,
fabric_header_multicast.mcastGrp);

modify_field(ethernet.etherType, fabric_payload_header.etherType);
Expand All @@ -96,7 +96,7 @@ action terminate_fabric_multicast_packet() {

action switch_fabric_multicast_packet() {
modify_field(fabric_metadata.fabric_header_present, TRUE);
modify_field(intrinsic_metadata.mcast_grp, fabric_header.dstPortOrGroup);
modify_field(intrinsic_mcast_grp, fabric_header.dstPortOrGroup);
}
#endif /* MULTICAST_DISABLE */
#endif /* FABRIC_ENABLE */
Expand Down Expand Up @@ -138,66 +138,42 @@ table fabric_ingress_src_lkp {
}
#endif /* FABRIC_ENABLE */

action terminate_inner_ethernet_non_ip_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, inner_ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, inner_ethernet.dstAddr);
modify_field(l2_metadata.lkp_mac_type, inner_ethernet.etherType);
action non_ip_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, ethernet.dstAddr);
modify_field(l2_metadata.lkp_mac_type, ethernet.etherType);
}

action terminate_inner_ethernet_ipv4_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, inner_ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, inner_ethernet.dstAddr);
modify_field(l2_metadata.lkp_mac_type, inner_ethernet.etherType);
modify_field(ipv4_metadata.lkp_ipv4_sa, inner_ipv4.srcAddr);
modify_field(ipv4_metadata.lkp_ipv4_da, inner_ipv4.dstAddr);
modify_field(l3_metadata.lkp_ip_proto, inner_ipv4.protocol);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_inner_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_inner_l4_dport);
action ipv4_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, ethernet.dstAddr);
modify_field(ipv4_metadata.lkp_ipv4_sa, ipv4.srcAddr);
modify_field(ipv4_metadata.lkp_ipv4_da, ipv4.dstAddr);
modify_field(l3_metadata.lkp_ip_proto, ipv4.protocol);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_outer_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_outer_l4_dport);
}

action terminate_inner_ipv4_over_fabric() {
modify_field(ipv4_metadata.lkp_ipv4_sa, inner_ipv4.srcAddr);
modify_field(ipv4_metadata.lkp_ipv4_da, inner_ipv4.dstAddr);
modify_field(l3_metadata.lkp_ip_version, inner_ipv4.version);
modify_field(l3_metadata.lkp_ip_proto, inner_ipv4.protocol);
modify_field(l3_metadata.lkp_ip_ttl, inner_ipv4.ttl);
modify_field(l3_metadata.lkp_ip_tc, inner_ipv4.diffserv);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_inner_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_inner_l4_dport);
action ipv6_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, ethernet.dstAddr);
modify_field(ipv6_metadata.lkp_ipv6_sa, ipv6.srcAddr);
modify_field(ipv6_metadata.lkp_ipv6_da, ipv6.dstAddr);
modify_field(l3_metadata.lkp_ip_proto, ipv6.nextHdr);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_outer_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_outer_l4_dport);
}

action terminate_inner_ethernet_ipv6_over_fabric() {
modify_field(l2_metadata.lkp_mac_sa, inner_ethernet.srcAddr);
modify_field(l2_metadata.lkp_mac_da, inner_ethernet.dstAddr);
modify_field(l2_metadata.lkp_mac_type, inner_ethernet.etherType);
modify_field(ipv6_metadata.lkp_ipv6_sa, inner_ipv6.srcAddr);
modify_field(ipv6_metadata.lkp_ipv6_da, inner_ipv6.dstAddr);
modify_field(l3_metadata.lkp_ip_proto, inner_ipv6.nextHdr);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_inner_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_inner_l4_dport);
}

action terminate_inner_ipv6_over_fabric() {
modify_field(ipv6_metadata.lkp_ipv6_sa, inner_ipv6.srcAddr);
modify_field(ipv6_metadata.lkp_ipv6_da, inner_ipv6.dstAddr);
modify_field(l3_metadata.lkp_ip_proto, inner_ipv6.nextHdr);
modify_field(l3_metadata.lkp_l4_sport, l3_metadata.lkp_inner_l4_sport);
modify_field(l3_metadata.lkp_l4_dport, l3_metadata.lkp_inner_l4_dport);
}

table tunneled_packet_over_fabric {
table native_packet_over_fabric {
reads {
tunnel_metadata.ingress_tunnel_type : exact;
inner_ipv4 : valid;
inner_ipv6 : valid;
ipv4 : valid;
ipv6 : valid;
}
actions {
terminate_inner_ethernet_non_ip_over_fabric;
terminate_inner_ethernet_ipv4_over_fabric;
terminate_inner_ipv4_over_fabric;
non_ip_over_fabric;
ipv4_over_fabric;
#ifndef IPV6_DISABLE
terminate_inner_ethernet_ipv6_over_fabric;
terminate_inner_ipv6_over_fabric;
ipv6_over_fabric;
#endif /* IPV6_DISABLE */
}
size : 1024;
Expand All @@ -207,32 +183,36 @@ table tunneled_packet_over_fabric {
/* Ingress fabric header processing */
/*****************************************************************************/
control process_ingress_fabric {
apply(fabric_ingress_dst_lkp);
if (ingress_metadata.port_type != PORT_TYPE_NORMAL) {
apply(fabric_ingress_dst_lkp);
#ifdef FABRIC_ENABLE
if (valid(fabric_header_multicast)) {
apply(fabric_ingress_src_lkp);
}
if (tunnel_metadata.tunnel_terminate == TRUE) {
apply(tunneled_packet_over_fabric);
}
if (ingress_metadata.port_type == PORT_TYPE_FABRIC) {
if (valid(fabric_header_multicast)) {
apply(fabric_ingress_src_lkp);
}
if (tunnel_metadata.tunnel_terminate == FALSE) {
apply(native_packet_over_fabric);
}
}
#endif /* FABRIC_ENABLE */
}
}

/*****************************************************************************/
/* Fabric LAG resolution */
/*****************************************************************************/
#ifdef FABRIC_ENABLE
action set_fabric_lag_port(port) {
modify_field(standard_metadata.egress_spec, port);
modify_field(ingress_egress_port, port);
}

#ifndef MULTICAST_DISABLE
action set_fabric_multicast(fabric_mgid) {
modify_field(multicast_metadata.mcast_grp, intrinsic_metadata.mcast_grp);
modify_field(multicast_metadata.mcast_grp, intrinsic_mcast_grp);

#ifdef FABRIC_NO_LOCAL_SWITCHING
// no local switching, reset fields to send packet on fabric mgid
modify_field(intrinsic_metadata.mcast_grp, fabric_mgid);
modify_field(intrinsic_mcast_grp, fabric_mgid);
#endif /* FABRIC_NO_LOCAL_SWITCHING */
}
#endif /* MULTICAST_DISABLE */
Expand Down
Loading