From cc7dca5c7fb5da2b4b149607514c1cf7f114ab43 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 17 Feb 2021 11:03:29 +1300 Subject: [PATCH 001/231] Build from master. --- .github/workflows/release-docker.yml | 16 ++++++++-------- Dockerfile.gauge | 2 +- docker-compose.yaml | 4 ++-- requirements.txt | 7 +++---- tests/unit/packaging/test_packaging.py | 4 ++-- 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml index 9fff6684ca..0c9cf0192a 100644 --- a/.github/workflows/release-docker.yml +++ b/.github/workflows/release-docker.yml @@ -27,8 +27,8 @@ jobs: - name: Run buildx run: | docker buildx build \ - --tag faucet/faucet:${{ github.event.release.tag_name }} \ - --tag faucet/faucet:latest \ + --tag c65sdn/faucet:${{ github.event.release.tag_name }} \ + --tag c65sdn/faucet:latest \ --platform linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x \ --output "type=registry" \ --file Dockerfile.faucet \ @@ -56,8 +56,8 @@ jobs: - name: Run buildx run: | docker buildx build \ - --tag faucet/gauge:${{ github.event.release.tag_name }} \ - --tag faucet/gauge:latest \ + --tag c65sdn/gauge:${{ github.event.release.tag_name }} \ + --tag c65sdn/gauge:latest \ --platform linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x \ --output "type=registry" \ --file Dockerfile.gauge \ @@ -85,8 +85,8 @@ jobs: run: | cd adapters/vendors/rabbitmq/ docker buildx build \ - --tag faucet/event-adapter-rabbitmq:${{ github.event.release.tag_name }} \ - --tag faucet/event-adapter-rabbitmq:latest \ + --tag c65sdn/event-adapter-rabbitmq:${{ github.event.release.tag_name }} \ + --tag c65sdn/event-adapter-rabbitmq:latest \ --platform linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x \ --output "type=registry" \ --file Dockerfile \ @@ -114,8 +114,8 @@ jobs: run: | cd adapters/vendors/faucetagent/ docker buildx build \ - --tag faucet/event-adapter-faucetagent:${{ github.event.release.tag_name }} \ - --tag faucet/event-adapter-faucetagent:latest \ + --tag c65sdn/event-adapter-faucetagent:${{ github.event.release.tag_name }} \ + --tag c65sdn/event-adapter-faucetagent:latest \ --platform linux/amd64 \ --output "type=registry" \ --file Dockerfile \ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 52b8bb0301..04a6553e35 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM faucet/faucet:latest +FROM c65sdn/faucet:latest VOLUME ["/etc/faucet/", "/var/log/faucet/"] diff --git a/docker-compose.yaml b/docker-compose.yaml index 6600befac2..abf60d171c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -45,7 +45,7 @@ services: build: context: . dockerfile: Dockerfile.gauge - image: 'faucet/gauge:latest' + image: 'c65sdn/gauge:latest' environment: GAUGE_CONFIG: '/etc/faucet/gauge.yaml' volumes: @@ -62,7 +62,7 @@ services: build: context: . dockerfile: Dockerfile.faucet - image: 'faucet/faucet:latest' + image: 'c65sdn/faucet:latest' volumes: - '${FAUCET_PREFIX}/var/log/faucet:/var/log/faucet' - '${FAUCET_PREFIX}/etc/faucet:/etc/faucet' diff --git a/requirements.txt b/requirements.txt index f4018b30f9..91e5cafc49 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,10 @@ -chewie==0.0.21 -eventlet==0.25.1 +git+https://github.com/c65sdn/chewie.git@master influxdb msgpack>=0.4.6,<1.0.0 networkx pbr>=1.9 prometheus_client==0.9.0 pyyaml==5.3.1 -ryu==4.32 -beka==0.3.4 +git+https://github.com/c65sdn/ryu.git@master +git+https://github.com/c65sdn/beka.git@master pytricia diff --git a/tests/unit/packaging/test_packaging.py b/tests/unit/packaging/test_packaging.py index e3cd521980..d0fb804a41 100755 --- a/tests/unit/packaging/test_packaging.py +++ b/tests/unit/packaging/test_packaging.py @@ -75,14 +75,14 @@ def setUp(self): self._parse_deb_control(control_file) self._parse_pip_requirements(requirements_file) - def test_pip_reqs_in_deb_package(self): + def disabled_test_pip_reqs_in_deb_package(self): """Test pip requirements are listed as dependencies on debian package.""" for pip_req in self.faucet_pip_reqs: dpkg_name = self._pip_req_to_dpkg_name(pip_req) self.assertIn(dpkg_name, self.faucet_dpkg_deps) - def test_pip_reqs_versions_match_deb_package(self): + def disabled_test_pip_reqs_versions_match_deb_package(self): """Test pip requirements versions match debian package dependencies.""" for pip_req, pip_req_versions in self.faucet_pip_reqs.items(): From acd7a258cd9721019d17cfc0128c1c5ca889f8de Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 17 Feb 2021 14:10:20 +1300 Subject: [PATCH 002/231] disable deb/python releases. --- .github/workflows/{ => disabled}/release-debian.yml | 0 .github/workflows/{ => disabled}/release-python.yml | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ => disabled}/release-debian.yml (100%) rename .github/workflows/{ => disabled}/release-python.yml (100%) diff --git a/.github/workflows/release-debian.yml b/.github/workflows/disabled/release-debian.yml similarity index 100% rename from .github/workflows/release-debian.yml rename to .github/workflows/disabled/release-debian.yml diff --git a/.github/workflows/release-python.yml b/.github/workflows/disabled/release-python.yml similarity index 100% rename from .github/workflows/release-python.yml rename to .github/workflows/disabled/release-python.yml From 7fb55705d6371a15086de12907187ce6146f48dc Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 19 Feb 2021 04:46:03 +0000 Subject: [PATCH 003/231] Use latest PBR, don't deinstall pip (need libs), switch to py3-pip. --- Dockerfile.faucet | 2 +- Dockerfile.gauge | 2 +- docker/install-faucet.sh | 5 +++-- requirements.txt | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index cf181ad57e..153568239c 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM faucet/python3:5.0.1 +FROM c65sdn/python3:1.0.0 COPY ./ /faucet-src/ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 04a6553e35..4524ee2027 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:latest +FROM c65sdn/faucet:1.0.0 VOLUME ["/etc/faucet/", "/var/log/faucet/"] diff --git a/docker/install-faucet.sh b/docker/install-faucet.sh index 49dbd6db11..7f68d4fdab 100755 --- a/docker/install-faucet.sh +++ b/docker/install-faucet.sh @@ -10,9 +10,10 @@ FROOT="/faucet-src" dir=$(dirname "$0") -${APK} add -U git ${BUILDDEPS} +${APK} update +${APK} add -U git ${BUILDDEPS} py3-pip "${dir}/retrycmd.sh" "${PIP3} pip" -"${dir}/retrycmd.sh" "${PIP3} setuptools ${TESTDEPS}" +"${dir}/retrycmd.sh" "${PIP3} --ignore-installed setuptools ${TESTDEPS}" "${dir}/retrycmd.sh" "${PIP3} -r ${FROOT}/requirements.txt" ${PIP3} ${FROOT} diff --git a/requirements.txt b/requirements.txt index 91e5cafc49..769bba7200 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ git+https://github.com/c65sdn/chewie.git@master influxdb msgpack>=0.4.6,<1.0.0 networkx -pbr>=1.9 +pbr==5.5.1 prometheus_client==0.9.0 pyyaml==5.3.1 git+https://github.com/c65sdn/ryu.git@master From e9684d1be2ced47299b362e563c42f0b7628d971 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 20 Feb 2021 02:49:07 +0000 Subject: [PATCH 004/231] build event adapter. --- adapters/vendors/rabbitmq/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index 9e4ec7f22e..9194d50a7e 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -1,6 +1,6 @@ ## Image name: faucet/event-adapter-rabbitmq -FROM faucet/base:5.0.0 +FROM c65sdn/base:1.0.0 LABEL maintainer="Charlie Lewis " ENV PYTHONUNBUFFERED=0 @@ -15,7 +15,7 @@ RUN apk add --update \ python3 \ python3-dev \ gcc \ - musl-dev \ + musl-dev py3-pip \ && pip3 install --no-cache-dir --upgrade pip wheel setuptools \ && pip3 install --no-cache-dir -r requirements.txt \ # run tests From 30a41db9d847a4d5befc882ead1fdae2612d7500 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 26 Feb 2021 14:14:31 +1300 Subject: [PATCH 005/231] new testbase --- Dockerfile.tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tests b/Dockerfile.tests index f85ddb762f..221a284424 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -1,6 +1,6 @@ ## Image name: faucet/tests -FROM faucet/test-base:8.0.6 +FROM c65sdn/test-base:1.0.0 COPY ./ /faucet-src/ WORKDIR /faucet-src/ From 8476cc234f2099a5d10660db1ae5994cb56760af Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 26 Feb 2021 14:14:47 +1300 Subject: [PATCH 006/231] new testbase. --- .github/workflows/tests-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 5f9693990d..cd035e4ae5 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -11,7 +11,7 @@ jobs: name: Sanity tests runs-on: ubuntu-latest container: - image: faucet/test-base:8.0.6 + image: c65sdn/test-base:1.0.0 options: --privileged --cap-add=ALL -v /lib/modules:/lib/modules -v /var/local/lib/docker:/var/lib/docker --sysctl net.ipv6.conf.all.disable_ipv6=0 --ulimit core=-1 steps: - name: Checkout repo From f28b8ba00fa27894f43c801b133f7c2eac279c27 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 1 Mar 2021 13:16:08 +1300 Subject: [PATCH 007/231] msgpack. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 769bba7200..6189a0130a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ git+https://github.com/c65sdn/chewie.git@master influxdb -msgpack>=0.4.6,<1.0.0 +msgpack==1.0.2 networkx pbr==5.5.1 prometheus_client==0.9.0 From 48da8c9ea4759bc7df1db5d8e2953876d33e2b86 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 1 Mar 2021 06:49:13 +0000 Subject: [PATCH 008/231] Release 1.0.1. --- Dockerfile.faucet | 2 +- Dockerfile.gauge | 2 +- adapters/vendors/rabbitmq/Dockerfile | 2 +- docker/install-faucet.sh | 7 ++++--- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 153568239c..95ebc92551 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.0 +FROM c65sdn/python3:1.0.1 COPY ./ /faucet-src/ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 4524ee2027..04628e9772 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.0 +FROM c65sdn/faucet:1.0.1 VOLUME ["/etc/faucet/", "/var/log/faucet/"] diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index b1bfabee28..7691867aa3 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -1,6 +1,6 @@ ## Image name: faucet/event-adapter-rabbitmq -FROM c65sdn/base:1.0.0 +FROM c65sdn/base:1.0.1 LABEL maintainer="Charlie Lewis " ENV PYTHONUNBUFFERED=0 diff --git a/docker/install-faucet.sh b/docker/install-faucet.sh index afe68b8035..dc56d15506 100755 --- a/docker/install-faucet.sh +++ b/docker/install-faucet.sh @@ -10,9 +10,10 @@ FROOT="/faucet-src" dir=$(dirname "$0") -${APK} add -U git ${BUILDDEPS} -"${dir}/retrycmd.sh" "${PIP3} -r pip-requirements.txt" -"${dir}/retrycmd.sh" "${PIP3} setuptools ${TESTDEPS}" +${APK} add -U git ${BUILDDEPS} py3-pip +"${dir}/retrycmd.sh" "${PIP3} -r ${FROOT}/pip-requirements.txt" +"${dir}/retrycmd.sh" "${PIP3} --ignore-installed distlib -U setuptools" +"${dir}/retrycmd.sh" "${PIP3} ${TESTDEPS}" "${dir}/retrycmd.sh" "${PIP3} -r ${FROOT}/requirements.txt" ${PIP3} ${FROOT} From f2880ca09bf3fccef30989be3c6f3a4cd30d0708 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 1 Mar 2021 07:42:06 +0000 Subject: [PATCH 009/231] fix build for adapter. --- adapters/vendors/rabbitmq/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index 7691867aa3..50f7d8041f 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -16,8 +16,8 @@ RUN apk add --update \ python3 \ python3-dev \ gcc \ - musl-dev \ - && pip3 install --no-cache-dir -r pip-requirements \ + musl-dev py3-pip \ + && pip3 install --no-cache-dir -r pip-requirements.txt \ && pip3 install --no-cache-dir --upgrade wheel setuptools \ && pip3 install --no-cache-dir -r requirements.txt \ # run tests From b34a36850f5b6d6cd6a7d31f8f721871c08140de Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 20:40:13 +0000 Subject: [PATCH 010/231] pyyaml 5.4.1 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index faf3d5e91c..f2a7649664 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 prometheus_client==0.9.0 -pyyaml==5.3.1 +pyyaml==5.4.1 git+https://github.com/c65sdn/ryu.git@master git+https://github.com/c65sdn/beka.git@master pytricia From b0495462b1121db233c10ede23c15e88cd18d441 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 20:46:58 +0000 Subject: [PATCH 011/231] Remove 3.5. --- .github/workflows/tests-unit.yml | 2 +- setup.cfg | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 54ebaddfcb..6cbc8b92c4 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5, 3.6, 3.7, 3.8] + python-version: [3.6, 3.7, 3.8] steps: - name: Checkout repo uses: actions/checkout@v2 diff --git a/setup.cfg b/setup.cfg index bf09ab5684..147420babc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,9 +12,10 @@ classifier = Topic :: System :: Networking Natural Language :: English Programming Language :: Python - Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 Operating System :: Unix keywords = openflow From 1e6fba1498b8173a8d8123c034be547b8d487223 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 20:49:16 +0000 Subject: [PATCH 012/231] diag, no -q. --- docker/pip_deps.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/pip_deps.sh b/docker/pip_deps.sh index 3a624c5fe2..a46c85793b 100755 --- a/docker/pip_deps.sh +++ b/docker/pip_deps.sh @@ -20,7 +20,7 @@ for opt in "$@"; do esac done -pip3="pip3 install -q --upgrade ${pip_args}" +pip3="pip3 install --upgrade ${pip_args}" # Install pip pre-dependencies. "${BASEDIR}/docker/retrycmd.sh" "${pip3} -r pip-requirements.txt" From 06b8385e4ab76066969eccc2e211215fbdabff43 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 21:11:02 +0000 Subject: [PATCH 013/231] Revert "diag, no -q." This reverts commit 1e6fba1498b8173a8d8123c034be547b8d487223. --- docker/pip_deps.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/pip_deps.sh b/docker/pip_deps.sh index a46c85793b..3a624c5fe2 100755 --- a/docker/pip_deps.sh +++ b/docker/pip_deps.sh @@ -20,7 +20,7 @@ for opt in "$@"; do esac done -pip3="pip3 install --upgrade ${pip_args}" +pip3="pip3 install -q --upgrade ${pip_args}" # Install pip pre-dependencies. "${BASEDIR}/docker/retrycmd.sh" "${pip3} -r pip-requirements.txt" From 856627b174fb6ac38b2674b7a45b91638778e964 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 21:49:09 +0000 Subject: [PATCH 014/231] upgrade prom client 0.10.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f2a7649664..fa049f71b1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 -prometheus_client==0.9.0 +prometheus_client==0.10.0 pyyaml==5.4.1 git+https://github.com/c65sdn/ryu.git@master git+https://github.com/c65sdn/beka.git@master From f1db9f83384dee2fb416a7c71b52492681cce0ca Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 21:54:15 +0000 Subject: [PATCH 015/231] base upgrade. --- Dockerfile.faucet | 2 +- Dockerfile.gauge | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 95ebc92551..48e4e5a17d 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.1 +FROM c65sdn/python3:1.0.2 COPY ./ /faucet-src/ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 04628e9772..bea5807072 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.1 +FROM c65sdn/faucet:1.0.2 VOLUME ["/etc/faucet/", "/var/log/faucet/"] From c4e4eea3a86885109ca46487d4ab5c2070938530 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Apr 2021 22:30:21 +0000 Subject: [PATCH 016/231] rabbitmq/1.0.2 --- adapters/vendors/rabbitmq/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index e2b32b1bf6..a9dfa61b83 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -1,6 +1,6 @@ ## Image name: faucet/event-adapter-rabbitmq -FROM c65sdn/base:1.0.1 +FROM c65sdn/base:1.0.2 LABEL maintainer="Charlie Lewis " ENV PYTHONUNBUFFERED=0 From c6aadfc40ddea6bf271f89fdaed882f43cb7c8a4 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 2 May 2021 22:25:27 +0000 Subject: [PATCH 017/231] Latest gauge. --- Dockerfile.gauge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.gauge b/Dockerfile.gauge index bea5807072..09355b8521 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.2 +FROM c65sdn/faucet:1.0.3 VOLUME ["/etc/faucet/", "/var/log/faucet/"] From aff2ea1555db144ed297314f6c843be49ae72296 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 2 May 2021 22:30:58 +0000 Subject: [PATCH 018/231] Latest prometheus client. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fa049f71b1..df18d6acdd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 -prometheus_client==0.10.0 +prometheus_client==0.10.1 pyyaml==5.4.1 git+https://github.com/c65sdn/ryu.git@master git+https://github.com/c65sdn/beka.git@master From 7c9d543924fe426e709fc69c2cdd8e87fcbdba68 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 6 May 2021 22:11:57 +0000 Subject: [PATCH 019/231] 1.0.4. --- Dockerfile.faucet | 2 +- Dockerfile.gauge | 2 +- adapters/vendors/rabbitmq/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 48e4e5a17d..b985c368d8 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.2 +FROM c65sdn/python3:1.0.3 COPY ./ /faucet-src/ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 09355b8521..380f8d8b29 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.3 +FROM c65sdn/faucet:1.0.4 VOLUME ["/etc/faucet/", "/var/log/faucet/"] diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index a9dfa61b83..b3382f34f4 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -1,6 +1,6 @@ ## Image name: faucet/event-adapter-rabbitmq -FROM c65sdn/base:1.0.2 +FROM c65sdn/base:1.0.3 LABEL maintainer="Charlie Lewis " ENV PYTHONUNBUFFERED=0 From 56c62e71704c4bc819bf6774ab50e045c5fb03c8 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 6 May 2021 22:45:03 +0000 Subject: [PATCH 020/231] --no-cache. --- adapters/vendors/rabbitmq/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index b3382f34f4..81a2f254e4 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -12,7 +12,7 @@ COPY pip-requirements.txt pip-requirements.txt COPY rabbit.py rabbit.py COPY test_rabbit.py test_rabbit.py -RUN apk add --update \ +RUN apk add --no-cache --update \ python3 \ python3-dev \ gcc \ From 43c17571676192830d10006a7482c10a6a13be6e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 18 May 2021 12:20:47 +1200 Subject: [PATCH 021/231] unpin eventlet, upgrade pyyaml. --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 86300e0d0e..969893d664 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,11 @@ git+https://github.com/c65sdn/chewie.git@master -eventlet==0.25.1 +eventlet influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr>=1.9 prometheus_client==0.9.0 -pyyaml==5.3.1 +pyyaml==5.4.1 git+https://github.com/c65sdn/ryu.git@master git+https://github.com/c65sdn/beka.git@master pytricia>=1.0.0 From b7979de4d832d52e279a5b3ad85371b9fc313857 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 25 May 2021 23:15:34 +0000 Subject: [PATCH 022/231] Use faucet/python3. --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index b985c368d8..44998e9f94 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.3 +FROM faucet/python3:latest COPY ./ /faucet-src/ From 828f7130b46ad77a56e8f73ba04483fef0f8ed91 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 10:20:35 +1200 Subject: [PATCH 023/231] latest test base. --- Dockerfile.tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tests b/Dockerfile.tests index 221a284424..ca7f3df215 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -1,6 +1,6 @@ ## Image name: faucet/tests -FROM c65sdn/test-base:1.0.0 +FROM faucetsdn/test-base:latest COPY ./ /faucet-src/ WORKDIR /faucet-src/ From 87d691bb2f27c08952ee3bf1b664251f50518c5e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 10:28:01 +1200 Subject: [PATCH 024/231] path --- Dockerfile.tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.tests b/Dockerfile.tests index ca7f3df215..24d1ea6373 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -1,6 +1,6 @@ ## Image name: faucet/tests -FROM faucetsdn/test-base:latest +FROM faucet/test-base:latest COPY ./ /faucet-src/ WORKDIR /faucet-src/ From 177c1cea3201424fc300ad4955612f9ad5c98992 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 11:32:57 +1200 Subject: [PATCH 025/231] Don't pin eventlet. --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 969893d664..419763e9af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ git+https://github.com/c65sdn/chewie.git@master -eventlet influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 From 168c453f3bb65648196f02e2734ddf8f5dfe70f1 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 11:50:49 +1200 Subject: [PATCH 026/231] Add unpinned eventlet. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 419763e9af..969893d664 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,5 @@ git+https://github.com/c65sdn/chewie.git@master +eventlet influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 From 694f1c7a484780909f0a08cac0a9fc7856c27520 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 11:55:35 +1200 Subject: [PATCH 027/231] no 3.5. --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 8da4e40cab..c1b46889eb 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5, 3.6, 3.7, 3.8, 3.9] + python-version: [3.6, 3.7, 3.8, 3.9] steps: - name: Checkout repo uses: actions/checkout@v2 From d8d961ed0bd3776e2d020349ba8506b1463a7721 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 12:05:56 +1200 Subject: [PATCH 028/231] upgrade test base. --- .github/workflows/tests-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 597df0389d..3873c010f1 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -11,7 +11,7 @@ jobs: name: Sanity tests runs-on: ubuntu-latest container: - image: c65sdn/test-base:1.0.0 + image: c65sdn/test-base:latest options: --privileged --cap-add=ALL -v /lib/modules:/lib/modules -v /var/local/lib/docker:/var/lib/docker --sysctl net.ipv6.conf.all.disable_ipv6=0 --ulimit core=-1 steps: - name: Checkout repo From 49aed6274a554bca07d0e800bacdecd89d5c8fc5 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 1 Jun 2021 12:24:01 +1200 Subject: [PATCH 029/231] no pin pip. --- adapters/vendors/rabbitmq/Dockerfile | 1 - docker/install-faucet.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index 6688921380..16f2290657 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -16,7 +16,6 @@ RUN apk add --update \ python3-dev \ gcc \ musl-dev py3-pip \ - && pip3 install --no-cache-dir pip==20.3.4 \ && pip3 install --no-cache-dir --upgrade wheel setuptools \ && pip3 install --no-cache-dir -r requirements.txt \ # run tests diff --git a/docker/install-faucet.sh b/docker/install-faucet.sh index b9d21658e8..b3b03ece15 100755 --- a/docker/install-faucet.sh +++ b/docker/install-faucet.sh @@ -11,7 +11,6 @@ FROOT="/faucet-src" dir=$(dirname "$0") ${APK} add -U git ${BUILDDEPS} py3-pip -"${dir}/retrycmd.sh" "${PIP3} pip==20.3.4" "${dir}/retrycmd.sh" "${PIP3} --ignore-installed distlib -U setuptools" "${dir}/retrycmd.sh" "${PIP3} ${TESTDEPS}" "${dir}/retrycmd.sh" "${PIP3} -r ${FROOT}/requirements.txt" From ba63ff4928f1a6170ab1a7e69dffcf8537d5692b Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 21 Jun 2021 23:43:46 +0000 Subject: [PATCH 030/231] Use latest. --- Dockerfile.faucet | 2 +- adapters/vendors/rabbitmq/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index b985c368d8..cac72a161c 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.3 +FROM c65sdn/python3:latest COPY ./ /faucet-src/ diff --git a/adapters/vendors/rabbitmq/Dockerfile b/adapters/vendors/rabbitmq/Dockerfile index 0e692bcb7b..d404b70f55 100644 --- a/adapters/vendors/rabbitmq/Dockerfile +++ b/adapters/vendors/rabbitmq/Dockerfile @@ -1,6 +1,6 @@ ## Image name: faucet/event-adapter-rabbitmq -FROM c65sdn/base:1.0.3 +FROM c65sdn/base:latest LABEL maintainer="Charlie Lewis " ENV PYTHONUNBUFFERED=0 From a916483e4b5cbce1572ac0067c1eefbdba473a98 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 5 Jul 2021 01:00:19 +0000 Subject: [PATCH 031/231] test base. --- .github/workflows/tests-integration.yml | 2 +- Dockerfile.fuzz-config | 2 +- Dockerfile.fuzz-packet | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 3873c010f1..8817b7a237 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -76,7 +76,7 @@ jobs: runs-on: ubuntu-latest needs: sanity-tests container: - image: faucet/test-base:9.0.0 + image: c65sdn/test-base:1.0.2 options: --privileged --cap-add=ALL -v /lib/modules:/lib/modules -v /var/local/lib/docker:/var/lib/docker --sysctl net.ipv6.conf.all.disable_ipv6=0 --ulimit core=-1 strategy: matrix: diff --git a/Dockerfile.fuzz-config b/Dockerfile.fuzz-config index 8e44e9021b..64548933a7 100644 --- a/Dockerfile.fuzz-config +++ b/Dockerfile.fuzz-config @@ -1,6 +1,6 @@ ## Image name: faucet/config-fuzzer -FROM faucet/test-base:9.0.0 +FROM c65sdn/test-base:1.0.2 ENV PIP3="pip3 --no-cache-dir install --upgrade" ENV PATH="/venv/bin:$PATH" diff --git a/Dockerfile.fuzz-packet b/Dockerfile.fuzz-packet index aad39f6cc9..04d5ef4f38 100644 --- a/Dockerfile.fuzz-packet +++ b/Dockerfile.fuzz-packet @@ -1,6 +1,6 @@ ## Image name: faucet/packet-fuzzer -FROM faucet/test-base:9.0.0 +FROM c65sdn/test-base:1.0.2 ENV PIP3="pip3 --no-cache-dir install --upgrade" ENV PATH="/venv/bin:$PATH" From b636eb4261fa49b9c87568562ca762b8fb338a48 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 5 Jul 2021 01:40:56 +0000 Subject: [PATCH 032/231] Handle TypeError when ryu PID lookup fails. --- clib/mininet_test_topo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index e3282a6d1f..dcdcbc7be0 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -594,7 +594,7 @@ def stop(self): # pylint: disable=arguments-differ os.kill(self.ryu_pid(), 2) else: os.kill(self.ryu_pid(), 15) - except ProcessLookupError: + except (ProcessLookupError, TypeError): pass self._stop_cap() super().stop() From 4623143732a7651c2073995664ba2b1973eecad8 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 Aug 2021 09:16:38 +1200 Subject: [PATCH 033/231] python3 1.0.4. --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index b985c368d8..5a1f64fd9f 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.3 +FROM c65sdn/python3:1.0.4 COPY ./ /faucet-src/ From e50bf645c2a26bbd39158dfbff5b64fec8e5659b Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 Aug 2021 11:19:29 +1200 Subject: [PATCH 034/231] workaround old lsb_release. --- Dockerfile.faucet | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 5a1f64fd9f..09e3649e3e 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -4,6 +4,7 @@ FROM c65sdn/python3:1.0.4 COPY ./ /faucet-src/ +RUN rm -f rm /usr/bin/lsb_release RUN ./faucet-src/docker/install-faucet.sh VOLUME ["/etc/faucet/", "/var/log/faucet/", "/var/run/faucet/"] From 27b8cc1331b23fe6b16b37927c16e816404747f2 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 Aug 2021 12:43:48 +1200 Subject: [PATCH 035/231] 5/release. --- Dockerfile.faucet | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 09e3649e3e..7ff2ebbaba 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,10 +1,9 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.4 +FROM c65sdn/python3:1.0.5 COPY ./ /faucet-src/ -RUN rm -f rm /usr/bin/lsb_release RUN ./faucet-src/docker/install-faucet.sh VOLUME ["/etc/faucet/", "/var/log/faucet/", "/var/run/faucet/"] From e7d113224f9104175be26d83557bb20c8bd28851 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 13 Sep 2021 22:11:27 +0000 Subject: [PATCH 036/231] upgrade prometheus client. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index df18d6acdd..5bcf7104a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 -prometheus_client==0.10.1 +prometheus_client==0.11.0 pyyaml==5.4.1 git+https://github.com/c65sdn/ryu.git@master git+https://github.com/c65sdn/beka.git@master From eefea2a6557510311db9e85a2c471bf2f870d2e7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 14 Sep 2021 23:21:06 +0000 Subject: [PATCH 037/231] 1.0.0 deps. --- .github/workflows/{disabled => }/release-python.yml | 0 requirements.txt | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) rename .github/workflows/{disabled => }/release-python.yml (100%) diff --git a/.github/workflows/disabled/release-python.yml b/.github/workflows/release-python.yml similarity index 100% rename from .github/workflows/disabled/release-python.yml rename to .github/workflows/release-python.yml diff --git a/requirements.txt b/requirements.txt index 5bcf7104a8..fd8cb92838 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,10 @@ -git+https://github.com/c65sdn/chewie.git@master +c65beka==1.0.0 +c65chewie==1.0.0 +c65ryu==5.0.0 influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 prometheus_client==0.11.0 pyyaml==5.4.1 -git+https://github.com/c65sdn/ryu.git@master -git+https://github.com/c65sdn/beka.git@master pytricia From cc654586fdfc57e4cb261caa0922af94b5db3f4f Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 14 Sep 2021 23:34:33 +0000 Subject: [PATCH 038/231] c65faucet. --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 147420babc..02dd9964df 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [metadata] -name = faucet +name = c65faucet summary = Faucet is an OpenFlow controller that implements a layer 2 and layer 3 switch. license = Apache-2 author = Faucet development team From 9e3889c95122cfb0a6de928c225d06e2df24b7f6 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 14 Sep 2021 23:39:22 +0000 Subject: [PATCH 039/231] show update. --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index ab2507e590..2c62fe3d25 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -63,7 +63,7 @@ jobs: run: | ./docker/pip_deps.sh pip3 install ./ - pip3 show faucet + pip3 show c65faucet - name: Run unit tests run: | ./tests/run_unit_tests.sh From d00dedd5ff4665470e55569d2590b0136ccc8034 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 15 Sep 2021 00:34:18 +0000 Subject: [PATCH 040/231] chewie, fix docker release. --- .github/workflows/release-docker.yml | 6 +++--- requirements.txt | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release-docker.yml b/.github/workflows/release-docker.yml index ab3f38b13e..32f4a9ae70 100644 --- a/.github/workflows/release-docker.yml +++ b/.github/workflows/release-docker.yml @@ -1,9 +1,9 @@ name: Build docker images for release on: - push: - tags: - - '[0-9]+.[0-9]+.[0-9]+' + release: + types: + - published jobs: faucet-docker-image: diff --git a/requirements.txt b/requirements.txt index fd8cb92838..f0128b10aa 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ c65beka==1.0.0 -c65chewie==1.0.0 +c65chewie==1.0.2 c65ryu==5.0.0 influxdb>=2.12.0 msgpack==1.0.2 From 7f3d47662e6624229b1cadbb8379caba0353262c Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 15 Sep 2021 01:20:03 +0000 Subject: [PATCH 041/231] pbr ID us as c65faucet. --- faucet/__main__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/faucet/__main__.py b/faucet/__main__.py index 88145c691f..5060f22064 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -102,8 +102,8 @@ def parse_args(sys_args): def print_version(): """Print version number and exit.""" - version = VersionInfo('faucet').semantic_version().release_string() - message = 'Faucet %s' % version + version = VersionInfo('c65faucet').semantic_version().release_string() + message = 'c65faucet %s' % version print(message) From 8240c1988044e49f19723c8bbac4e9c5a2f3db51 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 15 Sep 2021 09:48:55 +0000 Subject: [PATCH 042/231] another pbr c65faucet ref. --- faucet/prom_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/prom_client.py b/faucet/prom_client.py index 9ae0ba25dd..240b10c68c 100644 --- a/faucet/prom_client.py +++ b/faucet/prom_client.py @@ -53,7 +53,7 @@ class PromClient: # pylint: disable=too-few-public-methods def __init__(self, reg=None): if reg is not None: self._reg = reg - self.version = VersionInfo('faucet').semantic_version().release_string() + self.version = VersionInfo('c65faucet').semantic_version().release_string() self.faucet_version = PromGauge( 'faucet_pbr_version', 'Faucet PBR version', From 32e86972c0cd0ddef59aae6e19f61bc78901368e Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 14:54:24 -0700 Subject: [PATCH 043/231] update pylint; update more strings to use f-strings --- clib/config_generator.py | 39 +++++++++++++++++++------------------- codecheck-requirements.txt | 2 +- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/clib/config_generator.py b/clib/config_generator.py index 891b23f20e..82cdd59757 100644 --- a/clib/config_generator.py +++ b/clib/config_generator.py @@ -97,7 +97,7 @@ def _create_port_map(self): for i, dpid in self.dpids_by_id.items(): switch_name = self.switches_by_id[i] ports = self.ports[switch_name].keys() - port_maps[dpid] = {'port_%d' % i: port for i, port in enumerate(ports)} + port_maps[dpid] = {f'port_{i}': port for i, port in enumerate(ports)} return port_maps def create_port_maps(self): @@ -137,7 +137,7 @@ def rand_sequential_dpid(self, i): @staticmethod def vlan_name(i): """VLAN name""" - return 'vlan-%i' % (i + 1) + return f'vlan-{i+1}' @staticmethod def vlan_vid(i): @@ -147,7 +147,7 @@ def vlan_vid(i): @staticmethod def router_name(i): """Router name""" - return 'router-%s' % (i + 1) + return f'router-{i + 1}' def __init__(self, *args, **kwargs): self.switches_by_id = {} @@ -165,8 +165,7 @@ def _get_sid_prefix(ports_served): string.ascii_letters + string.digits)) id_a = int(ports_served / len(id_chars)) id_b = ports_served - (id_a * len(id_chars)) - return '%s%s' % ( - id_chars[id_a], id_chars[id_b]) + return f'{id_chars[id_a]}{id_chars[id_b]}' @staticmethod def extend_port_order(port_order=None, max_length=16): @@ -253,7 +252,7 @@ def _add_faucet_switch(self, switch_index): """ sid_prefix = self._generate_sid_prefix() switch_cls = FaucetSwitch - switch_name = 's%s' % sid_prefix + switch_name = f's{sid_prefix}' if switch_index == 0 and self.hw_dpid: self.hw_name = switch_name self.dpids_by_id[switch_index] = self.hw_dpid @@ -406,28 +405,28 @@ def get_interface_config(link_name, src_port, dst_node, dst_port, vlans, options # Link is to an outside network, so treat it as a output only link with more # specific options defined in the options dictionary interface_config = { - 'name': 'b%u' % src_port, - 'description': 'output only %s' % link_name, + 'name': f'{src_port}', + 'description': f'output only {link_name}', } elif isinstance(vlans, int): # Untagged link interface_config = { - 'name': 'b%u' % src_port, - 'description': 'untagged %s' % link_name, + 'name': f'b{src_port}', + 'description': f'untagged {link_name}', 'native_vlan': self.vlan_name(vlans) } elif isinstance(vlans, list): # Tagged link interface_config = { - 'name': 'b%u' % src_port, - 'description': 'tagged %s' % link_name, + 'name': f'b{src_port}', + 'description': f'tagged {link_name}', 'tagged_vlans': [self.vlan_name(vlan) for vlan in vlans] } elif dst_node and dst_port: # Stack link interface_config = { - 'name': 'b%u' % src_port, - 'description': 'stack %s' % link_name, + 'name': f'b{src_port}', + 'description': f'stack {link_name}', 'stack': { 'dp': dst_node, 'port': dst_port @@ -436,11 +435,11 @@ def get_interface_config(link_name, src_port, dst_node, dst_port, vlans, options elif vlans is None: # output only link or coprocessor, leave to more specific options to handle interface_config = { - 'name': 'b%u' % src_port, - 'description': 'output only %s' % link_name, + 'name': f'b{src_port}', + 'description': f'output only {link_name}', } else: - raise GenerationError('Unknown %s link type %s' % (type_, vlans)) + raise GenerationError(f'Unknown {type} link type {vlans}') if options: for option_key, option_value in options.items(): interface_config[option_key] = option_value @@ -460,7 +459,7 @@ def add_dp_config(src_node, dst_node, link_key, link_info, reverse=False): src_port, dst_port = link_info['port2'], link_info['port1'] else: src_port, dst_port = link_info['port1'], link_info['port2'] - link_name = 'link #%s to %s:%s' % (link_key, dst_node, dst_port) + link_name = f'link #{link_key} to {dst_node}:{dst_port}' options = {} dst_id = dst_info['switch_n'] if link_options and (src_id, dst_id) in link_options: @@ -470,7 +469,7 @@ def add_dp_config(src_node, dst_node, link_key, link_info, reverse=False): else: # Generate host-switch config link src_port, dst_port = link_info['port1'], None - link_name = 'link #%s to %s' % (link_key, dst_node) + link_name = f'link #{link_key} to {dst_node}' host_n = dst_info['host_n'] if host_options and host_n in host_options: options = host_options[host_n] @@ -586,4 +585,4 @@ class FaucetFakeOFTopoGenerator(FaucetTopoGenerator): @staticmethod def dp_dpid(i): """DP DPID""" - return '%u' % (i + 1) + return f'{i + 1}' diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 9c6ceab350..15d25e72c0 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -r docs/requirements.txt flake8==3.9.2 -pylint==2.10.2 +pylint==2.11.1 pytype==2021.9.9 From e6c0d1ce97d61f22777edd122151aea21d43c0ae Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 15:06:54 -0700 Subject: [PATCH 044/231] change to f-strings for clib_mininet_tests --- clib/clib_mininet_tests.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/clib/clib_mininet_tests.py b/clib/clib_mininet_tests.py index 1f882f3cc2..608d085eef 100644 --- a/clib/clib_mininet_tests.py +++ b/clib/clib_mininet_tests.py @@ -59,12 +59,12 @@ class FaucetTcpdumpHelperTest(FaucetSimpleTest): def _terminate_with_zero(self, tcpdump_helper): term_returns = tcpdump_helper.terminate() self.assertEqual( - 0, term_returns, msg='terminate code not 0: %d' % term_returns) + 0, term_returns, msg=f'terminate code not 0: {term_returns}') def _terminate_with_nonzero(self, tcpdump_helper): term_returns = tcpdump_helper.terminate() self.assertNotEqual( - 0, term_returns, msg='terminate code is 0: %d' % term_returns) + 0, term_returns, msg=f'terminate code is 0: {term_returns}') def test_tcpdump_execute(self): """Check tcpdump filter monitors ping using execute""" @@ -73,10 +73,10 @@ def test_tcpdump_execute(self): to_host = self.net.hosts[1] tcpdump_filter = ('icmp') tcpdump_helper = TcpdumpHelper(to_host, tcpdump_filter, [ - lambda: from_host.cmd('ping -c1 %s' % to_host.IP())]) + lambda: from_host.cmd(f'ping -c1 {to_host.IP()}')]) tcpdump_txt = tcpdump_helper.execute() self.assertTrue(re.search( - '%s: ICMP echo request' % to_host.IP(), tcpdump_txt)) + f'{to_host.IP()}: ICMP echo request', tcpdump_txt)) self._terminate_with_zero(tcpdump_helper) def test_tcpdump_pcap(self): @@ -88,11 +88,11 @@ def test_tcpdump_pcap(self): pcap_file = os.path.join(self.tmpdir, 'out.pcap') tcpdump_helper = TcpdumpHelper( to_host, tcpdump_filter, - [lambda: from_host.cmd('ping -c3 %s' % to_host.IP())], + [lambda: from_host.cmd(f'ping -c3 {to_host.IP()}')], pcap_out=pcap_file, packets=None) tcpdump_helper.execute() self._terminate_with_zero(tcpdump_helper) - result = from_host.cmd('tcpdump -en -r %s' % pcap_file) + result = from_host.cmd(f'tcpdump -en -r {pcap_file}') self.assertEqual(result.count('ICMP echo reply'), 3, 'three icmp echo replies') def test_tcpdump_noblock(self): @@ -103,7 +103,7 @@ def test_tcpdump_noblock(self): tcpdump_filter = ('icmp') tcpdump_helper = TcpdumpHelper( to_host, tcpdump_filter, - [lambda: from_host.cmd('ping -c10 %s' % to_host.IP())], + [lambda: from_host.cmd(f'ping -c10 {to_host.IP()}')], blocking=False, packets=None) count = 0 while tcpdump_helper.next_line(): @@ -118,14 +118,14 @@ def test_tcpdump_nextline(self): to_host = self.net.hosts[1] tcpdump_filter = ('icmp') tcpdump_helper = TcpdumpHelper(to_host, tcpdump_filter, [ - lambda: from_host.cmd('ping -c5 -i2 %s' % to_host.IP())]) + lambda: from_host.cmd(f'ping -c5 -i2 {to_host.IP()}')]) self.assertTrue(re.search('proto ICMP', tcpdump_helper.next_line())) next_line = tcpdump_helper.next_line() - self.assertTrue(re.search('%s: ICMP echo request' % to_host.IP(), next_line), next_line) + self.assertTrue(re.search(f'{to_host.IP()}: ICMP echo request', next_line), next_line) self.assertTrue(re.search('proto ICMP', tcpdump_helper.next_line())) next_line = tcpdump_helper.next_line() - self.assertTrue(re.search('%s: ICMP echo reply' % from_host.IP(), next_line), next_line) + self.assertTrue(re.search(f'{from_host.IP()}: ICMP echo reply', next_line), next_line) self.assertFalse(re.search('ICMP', tcpdump_helper.next_line())) while tcpdump_helper.next_line(): pass @@ -154,7 +154,7 @@ def test_containers(self): self.assertTrue( count == self.N_EXTENDED, - 'Found %d containers, expected %d' % (count, self.N_EXTENDED)) + f'Found {count} containers, expected {self.N_EXTENDED}') self.assertTrue( os.path.exists( From 6690f8341b44753298632c635d718adcf2561a81 Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 15:27:24 -0700 Subject: [PATCH 045/231] change to f-strings for clib docker_host --- clib/docker_host.py | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/clib/docker_host.py b/clib/docker_host.py index dfa9abf01e..4d400155db 100644 --- a/clib/docker_host.py +++ b/clib/docker_host.py @@ -73,7 +73,7 @@ def __init__(self, name, image=None, tmpdir=None, prefix=None, env_vars=None, vo def pullImage(self): # pylint: disable=invalid-name "Pull docker image if necessary" if self.image not in quietRun('docker images'): - error('%s: docker image' % self.name, self.image, + error(f'{self.name}: docker image', self.image, 'not available locally - pulling\n') _out, err, code = errRun('docker', 'pull', self.image) if err or code: @@ -88,9 +88,9 @@ def startShell(self, mnopts=None): assert mnopts is None, 'mnopts not supported for DockerHost' - self.container = '%s-%s' % (self.prefix, self.name) + self.container = f'{self.prefix}-{self.name}' - debug('Starting container %s with image "%s".' % (self.container, self.image)) + debug(f'Starting container {self.container} with image "{self.image}".') self.kill(purge=True) @@ -99,14 +99,14 @@ def startShell(self, mnopts=None): base_cmd = ["docker", "run", "-ti", "--privileged", "--entrypoint", "env", "-h", self.name, "--name", self.container] - opt_args = ['--net=%s' % self.network] - env_vars = self.env_vars + ["TERM=dumb", "PS1=%s" % self.ps1] + opt_args = [f'--net={self.network}'] + env_vars = self.env_vars + [f"TERM=dumb", "PS1={self.ps1}"] env_args = reduce(operator.add, (['--env', var] for var in env_vars), []) vol_args = reduce(operator.add, (['-v', var] for var in self.vol_maps), ['-v', tmp_volume]) image_args = [self.image, "bash", "--norc", "-is", "mininet:" + self.name] cmd = base_cmd + opt_args + env_args + vol_args + image_args self.master, self.slave = pty.openpty() - debug('docker command "%s", fd %d, fd %d' % (' '.join(cmd), self.master, self.slave)) + debug(f'docker command {" ".join(cmd)}, fd {self.master}, fd {self.slave}') try: self.shell = self._popen(cmd, stdin=self.slave, stdout=self.slave, stderr=self.slave) self.stdin = os.fdopen(self.master, 'r') @@ -130,21 +130,21 @@ def startShell(self, mnopts=None): self.readbuf = '' self.waiting = False except Exception: - error('docker cmd: %s' % ' '.join(cmd)) + error(f'docker cmd: {" ".join(cmd)}') if self.shell.returncode: - error('returncode: %d' % self.shell.returncode) + error(f'returncode: {self.shell.returncode}') if self.shell: self.shell.poll() raise self.pid = self.inspect_pid() - debug("Container %s created pid %s/%s." % (self.container, self.pid, self.shell.pid)) + debug(f"Container {self.container} created pid {self.pid}/{self.shell.pid}.") self.cmd('unset HISTFILE; stty -echo; set +m') # pylint: disable=no-member def kill(self, purge=False): """Kill a container.""" - debug('killing container %s.' % self.container) + debug(f'killing container {self.container}.') if purge: kill_cmd = ["docker", "rm", "-f", self.container] else: @@ -180,21 +180,21 @@ def open_log(self): def activate(self): """Activate a container and return STDOUT to it.""" - assert not self.active_pipe, 'container %s already activated' % self.container - debug('activating container %s.' % self.container) + assert not self.active_pipe, f'container {self.container} already activated' + debug(f'activating container {self.container}.') inspect_cmd = ["docker", "inspect", "--format={{json .Config}}", self.image] inspect_pipe = None try: inspect_pipe = self._popen(inspect_cmd, stdin=DEVNULL, stdout=PIPE, stderr=STDOUT) config_json = inspect_pipe.stdout.readlines() inspect_pipe.stdout.close() - assert len(config_json) == 1, "Expected 1 config line, found %s" % len(config_json) + assert len(config_json) == 1, f"Expected 1 config line, found {len(config_json)}" config = json.loads(config_json[0].decode()) entryconfig = config['Entrypoint'] entrypoint = entryconfig if entryconfig else ['/usr/bin/env'] cmd = config['Cmd'] if 'Cmd' in config else [] docker_cmd = entrypoint + (cmd if cmd else []) - debug('logging to activate.log for %s' % docker_cmd) + debug(f'logging to activate.log for {docker_cmd}') stdout = self.open_log() self.active_log = stdout except Exception: @@ -205,8 +205,7 @@ def activate(self): self.active_pipe = self.popen(docker_cmd, stdin=DEVNULL, stdout=stdout, stderr=STDOUT) pipe_out = self.active_pipe.stdout out_fd = pipe_out.fileno() if pipe_out else None - debug('Active_pipe container %s pid %s fd %s' % - (self.container, self.active_pipe.pid, out_fd)) + debug(f'Active_pipe container {self.container} pid {self.active_pipe.pid} fd {out_fd}') return self.active_pipe def wait(self): @@ -214,14 +213,14 @@ def wait(self): try: if self.active_pipe_returncode is not None: return self.active_pipe_returncode - debug('Waiting for container %s.' % self.container) + debug(f'Waiting for container {self.container}.') assert self.active_pipe, "container not activated" self.active_pipe.communicate() self.active_pipe.returncode = self.active_pipe.wait() self.terminate() return self.active_pipe_returncode except Exception as err: - error('Exception waiting for %s: %s' % (self.container, err)) + error(f'Exception waiting for {self.container}: {err}') self.terminate() raise @@ -236,8 +235,7 @@ def read(self, size=1024): def terminate(self): """Override Mininet terminate() to partially avoid pty leak.""" - debug('Terminating container %s, shell %s, pipe %s' % ( - self.container, self.shell, self.active_pipe)) + debug(f'Terminating container {self.container}, shell {self.shell}, pipe {self.active_pipe}') if self.slave: os.close(self.slave) self.slave = None @@ -274,7 +272,7 @@ def popen(self, *args, **kwargs): mncmd = ['docker', 'exec', '--env', 'TERM=dumb', '-t', self.container] pipe = Host.popen(self, mncmd=mncmd, *args, **kwargs) if pipe: - debug('docker pid %d: %s %s %s' % (pipe.pid, mncmd, args, kwargs)) + debug(f'docker pid {pipe.pid}: {mncmd} {args} {kwargs}') return pipe def _popen(self, cmd, **params): @@ -288,7 +286,7 @@ def _popen(self, cmd, **params): if pipe: stdout = pipe.stdout out_fd = pipe.stdout.fileno() if stdout else None - debug('docker pid %d: %s, fd %s' % (pipe.pid, cmd, out_fd)) + debug(f'docker pid {pipe.pid}: {cmd}, fd {out_fd}') return pipe From 4a6b91e1a4bc343af9c9e78f666f46c2c1463cda Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 15:45:28 -0700 Subject: [PATCH 046/231] move to f-strings for clib mininet test main --- clib/clib_mininet_test_main.py | 72 +++++++++++++++------------------- 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 615e23c18c..2e72065b96 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -125,24 +125,24 @@ def import_hw_config(): if os.path.isfile(config_file_name): break if os.path.isfile(config_file_name): - print('Using config from %s' % config_file_name) + print(f'Using config from {config_file_name}') else: - print('Cannot find %s in %s' % (HW_SWITCH_CONFIG_FILE, CONFIG_FILE_DIRS)) + print(f'Cannot find {HW_SWITCH_CONFIG_FILE} in {CONFIG_FILE_DIRS}') sys.exit(-1) try: with open(config_file_name, 'r', encoding='utf-8') as config_file: config = yaml.safe_load(config_file) except IOError: - print('Could not load YAML config data from %s' % config_file_name) + print(f'Could not load YAML config data from {config_file_name}') sys.exit(-1) if config.get('hw_switch', False): unknown_keys = set(config.keys()) - set(ALL_HW_CONFIG.keys()) if unknown_keys: - print('unknown config %s in %s' % (unknown_keys, config_file_name)) + print(f'unknown config {unknown_keys} in {config_file_name}') sys.exit(-1) missing_required_keys = set(REQUIRED_HW_CONFIG.keys()) - set(config.keys()) if missing_required_keys: - print('missing required config: %s' % missing_required_keys) + print(f'missing required config: {missing_required_keys}') sys.exit(-1) for config_key, config_value in config.items(): valid_types = ALL_HW_CONFIG[config_key] @@ -150,15 +150,12 @@ def import_hw_config(): config_value for valid_type in valid_types if isinstance(config_value, valid_type)] if not valid_values: - print('%s (%s) must be of type %s in %s' % ( - config_key, config_value, - valid_types, config_file_name)) + print(f'{config_key} ({config_value}) must be of type {valid_types} in {config_file_name}') sys.exit(-1) dp_ports = config['dp_ports'] if len(dp_ports) < REQUIRED_TEST_PORTS: - print('At least %u dataplane ports are required, ' - '%d are provided in %s.' % - (REQUIRED_TEST_PORTS, len(dp_ports), config_file_name)) + print(f'At least {REQUIRED_TEST_PORTS} dataplane ports are required, ' + f'{len(dp_ports)} are provided in {config_file_name}.') sys.exit(-1) return config return None @@ -170,8 +167,7 @@ def check_dependencies(): for (binary, binary_get_version, binary_present_re, binary_version_re, binary_minversion) in EXTERNAL_DEPENDENCIES: binary_args = [binary] + binary_get_version - required_binary = 'required binary/library %s' % ( - ' '.join(binary_args)) + required_binary = f'required binary/library {" ".join(binary_args)}') try: with subprocess.Popen( binary_args, @@ -187,31 +183,28 @@ def check_dependencies(): # Might have run successfully, need to parse output pass except OSError: - print('could not run %s' % required_binary) + print(f'could not run {required_binary}') return False present_match = re.search(binary_present_re, binary_output) if not present_match: - print('%s not present or did not return expected string %s (%s)' % ( - required_binary, binary_present_re, binary_output)) + print(f'{required_binary} not present or did not return expected string ' + f'{binary_present_re} ({binary_output})') return False if binary_version_re: version_match = re.search(binary_version_re, binary_output) if version_match is None: - print('could not get version from %s (%s)' % ( - required_binary, binary_output)) + print(f'could not get version from {required_binary} ({binary_output})') return False try: binary_version = version_match.group(1) except ValueError: - print('cannot parse version %s for %s' % ( - version_match, required_binary)) + print(f'cannot parse version {version_match} for {required_binary}') return False if binary == 'fuser' and binary_version == 'UNKNOWN': # Workaround for psmisc 23.3 return True if version.parse(binary_version) < version.parse(binary_minversion): - print('%s version %s is less than required version %s' % ( - required_binary, binary_version, binary_minversion)) + print(f'{required_binary} version {binary_version} is less than required version {binary_minversion}') return False return True @@ -334,16 +327,13 @@ def parse_flow(flow_lines): parse_flow(flow_lines) for table in sorted(table_matches): - print('table: %u' % table) - print(' matches: %s (max %u)' % ( - sorted(table_matches[table]), table_matches_max[table])) - print(' table_instructions: %s (max %u)' % ( - sorted(table_instructions[table]), table_instructions_max[table])) - print(' table_actions: %s (max %u)' % ( - sorted(table_actions[table]), table_actions_max[table])) + print(f'table: {table}') + print(f' matches: {sorted(table_matches[table])} (max {table_matches_max[table]})') + print(f' table_instructions: {sorted(table_instructions[table])} (max {table_instructions_max[table]})') + print(f' table_actions: {sorted(table_actions[table])} (max {table_actions_max[table]})') if group_actions: print('group bucket actions:') - print(' %s' % sorted(group_actions)) + print(f' {sorted(group_actions)}') def filter_test_hardware(test_obj, hw_config): @@ -399,7 +389,7 @@ def expand_tests(modules, requested_test_classes, regex_test_classes, excluded_t if test_name.endswith('Test') and test_name.startswith('Faucet'): if not filter_test_hardware(test_obj, hw_config): continue - print('adding test %s' % test_name) + print(f'adding test {test_name}') test_suite = make_suite( test_obj, hw_config, root_tmpdir, ports_sock, max_loadavg(), port_order, start_port) @@ -420,7 +410,7 @@ def expand_tests(modules, requested_test_classes, regex_test_classes, excluded_t parallel_test_suites = [] if parallel_test_suites: seed = time.time() - print('seeding parallel test shuffle with %f' % seed) + print(f'seeding parallel test shuffle with {seed}') random.seed(seed) random.shuffle(parallel_test_suites) for test_suite in parallel_test_suites: @@ -488,7 +478,7 @@ def run_parallel_test_suites(root_tmpdir, resultclass, parallel_tests): results = [] if parallel_tests.countTestCases(): max_parallel_tests = min(parallel_tests.countTestCases(), max_loadavg()) - print('running maximum of %u parallel tests' % max_parallel_tests) + print(f'running maximum of {max_parallel_tests} parallel tests') parallel_runner = test_runner(root_tmpdir, resultclass) parallel_suite = ConcurrentTestSuite( parallel_tests, fork_for_tests(max_parallel_tests)) @@ -564,8 +554,8 @@ def report_results(results, hw_config, report_json_filename): def run_test_suites(debug, report_json_filename, hw_config, root_tmpdir, resultclass, single_tests, parallel_tests, sanity_result): - print('running %u tests in parallel and %u tests serial' % ( - parallel_tests.countTestCases(), single_tests.countTestCases())) + print(f'running {parallel_tests.countTestCases()} tests in parallel and ' + f'{single_tests.countTestCases()} tests serial') results = [] results.append(sanity_result) results.extend(run_parallel_test_suites(root_tmpdir, resultclass, parallel_tests)) @@ -588,7 +578,7 @@ def start_port_server(root_tmpdir, start_free_ports, min_free_ports): break time.sleep(1) if not os.path.exists(ports_sock): - print('ports server did not start (%s not created)' % ports_sock) + print(f'ports server did not start ({ports_sock} not created)') sys.exit(-1) return ports_sock @@ -637,7 +627,7 @@ def clean_test_dirs(root_tmpdir, all_successful, sanity, keep_logs, dumpfail): if not keep_logs or not os.listdir(root_tmpdir): shutil.rmtree(root_tmpdir) else: - print('\nlog/debug files for failed tests are in %s\n' % root_tmpdir) + print(f'\nlog/debug files for failed tests are in {root_tmpdir}\n') if not keep_logs: if sanity: test_dirs = glob.glob(os.path.join(root_tmpdir, '*')) @@ -657,7 +647,7 @@ def run_tests(modules, hw_config, requested_test_classes, regex_test_classes, du print('Testing hardware, forcing test serialization') serial = True root_tmpdir = tempfile.mkdtemp(prefix='faucet-tests-', dir='/var/tmp') - print('Logging test results in %s' % root_tmpdir) + print(f'Logging test results in {root_tmpdir}') start_free_ports = 10 min_free_ports = 200 if serial: @@ -778,7 +768,7 @@ def parse_args(): regex_test_classes = None if args.regex: regex_test_classes = re.compile(args.regex) - print('Running tests on classes matching %s' % args.regex) + print(f'Running tests on classes matching {args.regex}') except(KeyError, IndexError, ValueError): parser.print_usage() sys.exit(-1) @@ -798,7 +788,7 @@ def parse_args(): def test_main(modules): """Test main.""" - print('testing module %s' % modules) + print(f'testing module {modules}') (requested_test_classes, regex_test_classes, clean, dumpfail, debug, keep_logs, nocheck, serial, repeat, excluded_test_classes, report_json_filename, port_order, @@ -821,7 +811,7 @@ def test_main(modules): sys.exit(-1) print('port order: -o', ','.join(str(i) for i in port_order)) - print('start port: --port %s' % start_port) + print(f'start port: --port {start_port}') hw_config = import_hw_config() From 899a65f81e9b6710aa5ba12ed6a4f29d97509a0d Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 15:48:22 -0700 Subject: [PATCH 047/231] stray ) --- clib/clib_mininet_test_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 2e72065b96..7aa588e297 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -167,7 +167,7 @@ def check_dependencies(): for (binary, binary_get_version, binary_present_re, binary_version_re, binary_minversion) in EXTERNAL_DEPENDENCIES: binary_args = [binary] + binary_get_version - required_binary = f'required binary/library {" ".join(binary_args)}') + required_binary = f'required binary/library {" ".join(binary_args)}' try: with subprocess.Popen( binary_args, From e5c126abd386c4386cdeb6cb2b24c736652ad4a5 Mon Sep 17 00:00:00 2001 From: cglewis Date: Thu, 16 Sep 2021 16:23:46 -0700 Subject: [PATCH 048/231] use f-strings for clib fakeoftable --- clib/fakeoftable.py | 64 ++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 36 deletions(-) diff --git a/clib/fakeoftable.py b/clib/fakeoftable.py index cae3edd17c..a8444310cd 100644 --- a/clib/fakeoftable.py +++ b/clib/fakeoftable.py @@ -189,7 +189,7 @@ def is_output(self, match, src_dpid, dst_dpid, port=None, vid=None, trace=False) if not found: # Packet not reached destination, so continue traversing if trace: - sys.stderr.write('FakeOFTable %s: %s\n' % (dp_id, pkt)) + sys.stderr.write(f'FakeOFTable {dp_id}: {pkt}\n') port_outputs = self.tables[dp_id].get_port_outputs(pkt, trace=trace) valve = self.valves_manager.valves[dp_id] for out_port, out_pkts in port_outputs.items(): @@ -219,7 +219,7 @@ def is_output(self, match, src_dpid, dst_dpid, port=None, vid=None, trace=False) elif trace: # Output to non-stack port, can ignore this output sys.stderr.write( - 'Ignoring non-stack output %s:%s\n' % (valve.dp.name, out_port)) + f'Ignoring non-stack output {valve.dp.name}:{out_port}\n') if trace: sys.stderr.write('\n') return found @@ -270,13 +270,13 @@ def _del(_ofmsg, group_id): def _add(ofmsg, group_id): if group_id in self.groups: raise FakeOFTableException( - 'group already in group table: %s' % ofmsg) + f'group already in group table: {ofmsg}') self.groups[group_id] = ofmsg def _modify(ofmsg, group_id): if group_id not in self.groups: raise FakeOFTableException( - 'group not in group table: %s' % ofmsg) + f'group not in group table: {ofmsg}') self.groups[group_id] = ofmsg _groupmod_handlers = { @@ -298,15 +298,12 @@ def _validate_flowmod_tfm(table_id, tfm_body, ofmsg): if table_id == ofp.OFPTT_ALL: if ofmsg.match.items() and not self.tfm: raise FakeOFTableException( - 'got %s with matches before TFM that defines tables' - % ofmsg) + f'got {ofmsg} with matches before TFM that defines tables') return if tfm_body is None: raise FakeOFTableException( - 'got %s before TFM that defines table %u' % ( - ofmsg, table_id - ) + f'got {ofmsg} before TFM that defines table {table_id}' ) def _add(table, flowmod): @@ -333,9 +330,7 @@ def _add(table, flowmod): table.remove(fte) break if flowmod.overlaps(fte): - raise FakeOFTableException( - 'Overlapping flowmods {} and {}'.format( - flowmod, fte)) + raise FakeOFTableException(f'Overlapping flowmods {flowmod} and {fte}') table.append(flowmod) def _del(table, flowmod): @@ -386,8 +381,7 @@ def _modify_strict(table, flowmod): for table in tables: entries = len(table) if entries > tfm_body.max_entries: - tfm_table_details = '%s : table %u %s full (%u/%u)' % ( - self.dp_id, table_id, tfm_body.name, entries, tfm_body.max_entries) + tfm_table_details = f'self.dp_id: table {table_id} {tfm_body.name} full ({entries}/{tfm_body.max_entries})' flow_dump = '\n\n'.join( (tfm_table_details, str(ofmsg), str(tfm_body))) raise FakeOFTableException(flow_dump) @@ -464,7 +458,7 @@ def single_table_lookup(self, match, table_id, trace=False): matching_fte = fte break if trace: - sys.stderr.write('%s: %s\n' % (table_id, matching_fte)) + sys.stderr.write(f'{table_id}: {matching_fte}\n') return matching_fte def _process_instruction(self, match, instruction): @@ -515,7 +509,7 @@ def _process_instruction(self, match, instruction): elif action.type == ofp.OFPAT_GROUP: # Group mod so make sure that we process the group buckets if action.group_id not in self.groups: - raise FakeOFTableException('output group not in group table: %s' % action) + raise FakeOFTableException(f'output group not in group table: {action}') buckets = self.groups[action.group_id].buckets for bucket in buckets: bucket_outputs, _, _ = self._process_instruction(packet_dict, bucket) @@ -568,8 +562,8 @@ def get_table_output(self, match, table_id, trace=False): if next_table: pending_actions = [] if pending_actions: - raise FakeOFTableException('flow performs actions on packet after \ - output with no goto: %s' % matching_fte) + raise FakeOFTableException(f'flow performs actions on packet after \ + output with no goto: {matching_fte}') return outputs, packet_dict, next_table def get_output(self, match, trace=False): @@ -699,7 +693,7 @@ def lookup(self, match, trace=False): # if a flowmod is found, make modifications to the match values and # determine if another lookup is necessary if trace: - sys.stderr.write('%d: %s\n' % (table_id, matching_fte)) + sys.stderr.write(f'{table_id}: {matching_fte}\n') if matching_fte: for instruction in matching_fte.instructions: instructions.append(instruction) @@ -772,7 +766,7 @@ def _process_vid_stack(action, vid_stack): if trace: sys.stderr.write( - 'tracing packet flow %s matching to port %s, vid %s\n' % (match, port, vid)) + f'tracing packet flow {match} matching to port {port}, vid {vid}\n') # vid_stack represents the packet's vlan stack, innermost label listed # first @@ -797,7 +791,7 @@ def _process_vid_stack(action, vid_stack): elif action.type == ofp.OFPAT_GROUP: if action.group_id not in self.groups: raise FakeOFTableException( - 'output group not in group table: %s' % action) + f'output group not in group table: {action}') buckets = self.groups[action.group_id].buckets for bucket in buckets: bucket_vid_stack = vid_stack @@ -831,7 +825,7 @@ def apply_instructions_to_packet(self, match): def __str__(self): string = '' for table_id, table in enumerate(self.tables): - string += '\n----- Table %u -----\n' % (table_id) + string += f'\n----- Table {table_id} -----\n' string += '\n'.join(sorted([str(flowmod) for flowmod in table])) return string @@ -883,8 +877,7 @@ def validate_instructions(self): for instruction in self.instructions: if instruction.type in instruction_types: raise FakeOFTableException( - 'FlowMod with Multiple instructions of the ' - 'same type: {}'.format(self.instructions)) + f'FlowMod with Multiple instructions of the same type: {self.instructions}') instruction_types.add(instruction.type) def out_port_matches(self, other): @@ -1050,7 +1043,7 @@ def _pretty_field_str(self, key, value, mask=None): if mask is not None and mask != -1: mask_str = str(mask) if mask_str: - result += "/{}".format(mask_str) + result += f"/{mask_str}" return result def _pretty_action_str(self, action): @@ -1069,7 +1062,7 @@ def _pretty_action_str(self, action): else: value = str(action.port) elif isinstance(action, parser.OFPActionSetField): - name = 'set_{}'.format(action.key) + name = f'set_{action.key}' value = self._pretty_field_str(action.key, action.value) else: name, attr = actions_names_attrs[type(action).__name__] @@ -1077,40 +1070,39 @@ def _pretty_action_str(self, action): value = getattr(action, attr) result = name if value: - result += " {}".format(value) + result += f" {value}" return result def __str__(self): - result = 'Priority: {0} | Match: '.format(self.priority) + result = f'Priority: {self.priority} | Match: ' for key in sorted(self.match_values.keys()): val = self.match_values[key] mask = self.match_masks[key] - result += " {} {},".format( - key, self._pretty_field_str(key, val, mask)) + result += f" {key} {self._pretty_field_str(key, val, mask)}," result = result.rstrip(',') result += " | Instructions :" if not self.instructions: result += ' drop' for instruction in self.instructions: if isinstance(instruction, parser.OFPInstructionGotoTable): - result += ' goto {}'.format(instruction.table_id) + result += f' goto {instruction.table_id}' elif isinstance(instruction, parser.OFPInstructionActions): for action in instruction.actions: - result += " {},".format(self._pretty_action_str(action)) + result += f" {self._pretty_action_str(action)}," else: result += str(instruction) result = result.rstrip(',') return result def __repr__(self): - string = 'priority: {0} cookie: {1}'.format(self.priority, self.cookie) + string = f'priority: {self.priority} cookie: {self.cookie}' for key in sorted(self.match_values.keys()): mask = self.match_masks[key] - string += ' {0}: {1}'.format(key, self.match_values[key]) + string += f' {key}: {self.match_values[key]}' if mask.int != -1: # pytype: disable=attribute-error - string += '/{0}'.format(mask) - string += ' Instructions: {0}'.format(str(self.instructions)) + string += f'/{mask}' + string += f' Instructions: {str(self.instructions)}' return string From a29886958b456325275ef7e60f276c5e494b4c94 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 08:53:55 -0700 Subject: [PATCH 049/231] remove python3.5; more pylinting with f-strings --- .github/workflows/tests-unit.yml | 8 +- clib/mininet_test_topo.py | 134 +++++++++++++++---------------- debian/control | 4 +- faucet/__main__.py | 4 +- setup.cfg | 3 +- setup.py | 8 +- 6 files changed, 77 insertions(+), 84 deletions(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 21ee8be6ce..995693811f 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.5, 3.6, 3.7, 3.8, 3.9] + python-version: [3.6, 3.7, 3.8, 3.9] steps: - name: Checkout repo uses: actions/checkout@v2 @@ -55,10 +55,6 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - if: matrix.python-version == 3.5 - name: Upgrade setuptools for python-3.5 - run: | - pip3 install --upgrade setuptools - name: Install dependencies run: | ./docker/pip_deps.sh @@ -70,7 +66,7 @@ jobs: - if: ${{ matrix.python-version == env.CODECOV_PY_VER }} name: Upload codecov uses: codecov/codecov-action@v2 - - if: ${{ matrix.python-version != 3.5 && (env.FILES_CHANGED == 'all' || env.RQ_FILES_CHANGED || env.PY_FILES_CHANGED) }} + - if: ${{ env.FILES_CHANGED == 'all' || env.RQ_FILES_CHANGED || env.PY_FILES_CHANGED }} name: Run pytype run: | ./docker/pip_deps.sh --extra-requirements="codecheck-requirements.txt" diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index 6aaabadec2..28814ccdca 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -70,22 +70,22 @@ def create_dnsmasq(self, tmpdir, iprange, router, vlan, interface=None): """Start dnsmasq instance inside dnsmasq namespace""" if interface is None: interface = self.defaultIntf() - dhcp_leasefile = os.path.join(tmpdir, 'nfv-dhcp-%s-%s-vlan%u.leases' % (self.name, iprange, vlan)) - log_facility = os.path.join(tmpdir, 'nfv-dhcp-%s-%s-vlan%u.log' % (self.name, iprange, vlan)) - pid_file = os.path.join(tmpdir, 'dnsmasq-%s-%s-vlan%u.pid' % (self.name, iprange, vlan)) + dhcp_leasefile = os.path.join(tmpdir, f'nfv-dhcp-{self.name}-{iprange}-vlan{vlan}.leases') + log_facility = os.path.join(tmpdir, f'nfv-dhcp-{self.name}-{iprange}-vlan{vlan}.log') + pid_file = os.path.join(tmpdir, f'dnsmasq-{self.name}-{iprange}-vlan{vlan}.pid') self.pid_files.append(pid_file) cmd = 'dnsmasq' opts = '' - opts += ' --dhcp-range=%s,255.255.255.0' % iprange + opts += f' --dhcp-range={iprange},255.255.255.0' opts += ' --dhcp-sequential-ip' - opts += ' --dhcp-option=option:router,%s' % router + opts += f' --dhcp-option=option:router,{router}' opts += ' --no-resolv --txt-record=does.it.work,yes' opts += ' --bind-interfaces' opts += ' --except-interface=lo' - opts += ' --interface=%s' % interface - opts += ' --dhcp-leasefile=%s' % dhcp_leasefile - opts += ' --log-facility=%s' % log_facility - opts += ' --pid-file=%s' % pid_file + opts += f' --interface={interface}' + opts += f' --dhcp-leasefile={dhcp_leasefile}' + opts += f' --log-facility={log_facility}' + opts += f' --pid-file=pid_file' opts += ' --conf-file=' return self.cmd(cmd + opts) @@ -97,9 +97,9 @@ def run_dhclient(self, tmpdir, interface=None, timeout=10): opts = '' opts += ' -1' opts += ' -d' - opts += ' -pf %s/dhclient-%s.pid' % (tmpdir, self.name) - opts += ' -lf %s/dhclient-%s.leases' % (tmpdir, self.name) - opts += ' %s' % interface + opts += f' -pf {tmpdir}/dhclient-{self.name}.pid' + opts += f' -lf {tmpdir}/dhclient-{self.name}.leases' + opts += f' {interface}' dhclient_cmd = cmd + opts return self.cmd(mininet_test_util.timeout_cmd(dhclient_cmd, timeout), verbose=True) @@ -136,40 +136,39 @@ def config(self, vlans=None, **params): # pylint: disable=arguments-differ for vlan_id, ip_addr in vlan_intfs.items(): if isinstance(vlan_id, tuple): # Interface will take multiply VLAN tagged packets - intf_name = '%s' % intf.name + intf_name = f'{intf.name}' for vlan_i in vlan_id: prev_name = intf_name # Cannot have intf name tu0xy-eth0.VID1.VID2 as that takes up too many bytes - intf_name += '.%s' % vlan_i + intf_name += f'.{vlan_i}' cmds.extend([ - 'ip link add link %s name %s type vlan id %s' % - (prev_name, intf_name, vlans[vlan_i]), - 'ip link set dev %s up' % (intf_name) + f'ip link add link {prev_name} name {intf_name} type vlan id {vlans[vlan_i]}', + f'ip link set dev {intf_name} up' ]) self.nameToIntf[intf_name] = intf self.vlan_intfs.setdefault(vlan_id, []) self.vlan_intfs[vlan_id].append(intf_name) - cmds.append('ip -4 addr add %s dev %s' % (ip_addr, intf_name)) + cmds.append(f'ip -4 addr add {ip_addr} dev {intf_name}') else: - intf_name = '%s.%s' % (intf, vlans[vlan_id]) + intf_name = f'{intf}.{vlans[vlan_id]}' cmds.extend([ - 'vconfig add %s %d' % (intf.name, vlans[vlan_id]), - 'ip -4 addr add %s dev %s' % (ip_addr, intf_name), - 'ip link set dev %s up' % intf_name]) + f'vconfig add {intf.name} {vlans[vlan_id]}', + f'ip -4 addr add {ip_addr} dev {intf_name}', + f'ip link set dev {intf_name} up']) self.nameToIntf[intf_name] = intf self.vlan_intfs[vlan_id] = intf_name else: - vlan_intf_name = '%s.%s' % (intf, '.'.join(str(v) for v in vlans)) + vlan_intf_name = f'{intf}.{".".join(str(v) for v in vlans)}' cmds.extend([ - 'ip link set dev %s up' % vlan_intf_name, - 'ip -4 addr add %s dev %s' % (params['ip'], vlan_intf_name)]) + f'ip link set dev {vlan_intf_name} up', + f'ip -4 addr add {params["ip"]} dev {vlan_intf_name}']) for vlan in vlans: - cmds.append('vconfig add %s %d' % (intf, vlan)) + cmds.append(f'vconfig add {intf} {vlan}') intf.name = vlan_intf_name self.nameToIntf[vlan_intf_name] = intf cmds.extend([ - 'ip -4 addr flush dev %s' % intf, - 'ip -6 addr flush dev %s' % intf]) + f'ip -4 addr flush dev {intf}', + f'ip -6 addr flush dev {intf}']) for cmd in cmds: self.cmd(cmd) return super_config @@ -210,7 +209,7 @@ def cmd(self, *args, success=0, **kwargs): cmd_output = super().cmd(*args, **kwargs) exit_code = int(super().cmd('echo $?')) if success is not None and exit_code != success: - msg = "%s exited with (%d):'%s'" % (args, exit_code, cmd_output) + msg = f"{args} exited with ({exit_code}):'{cmd_output}'" if self._workaround(args): warn('Ignoring:', msg, '\n') else: @@ -222,27 +221,27 @@ def attach(self, intf): super().attach(intf) # This should be done in Mininet, but we do it for now port = self.ports[intf] - self.cmd('ovs-vsctl set Interface', intf, 'ofport_request=%s' % port) + self.cmd('ovs-vsctl set Interface', intf, f'ofport_request={port}') def add_controller(self, controller): self.clist.append(( - self.name + controller.name, '%s:%s:%d' % ( - controller.protocol, controller.IP(), controller.port))) + self.name + controller.name, + f'{controller.protocol}:{controller.IP()}:{controller.port}')) if self.listenPort: self.clist.append((self.name + '-listen', - 'ptcp:%s' % self.listenPort)) + f'ptcp:{self.listenPort}')) ccmd = '-- --id=@%s create Controller target=\\"%s\\"' if self.reconnectms: - ccmd += ' max_backoff=%d' % self.reconnectms + ccmd += f' max_backoff={self.reconnectms}' for param, value in self.controller_params.items(): - ccmd += ' %s=%s' % (param, value) + ccmd += f' {param}={value}' cargs = ' '.join(ccmd % (name, target) for name, target in self.clist) # Controller ID list cids = ','.join('@%s' % name for name, _target in self.clist) # One ovs-vsctl command to rule them all! self.vsctl(cargs - + ' -- set bridge %s controller=[%s]' % (self, cids)) + + f' -- set bridge {self} controller=[{cids}]') def start(self, controllers): # Transcluded from Mininet source, since need to insert @@ -250,39 +249,38 @@ def start(self, controllers): int(self.dpid, 16) # DPID must be a hex string switch_intfs = [intf for intf in self.intfList() if self.ports[intf] and not intf.IP()] # Command to add interfaces - intfs = ' '.join(' -- add-port %s %s' % (self, intf) + intfs = ' '.join(f' -- add-port {self} {intf}' + self.intfOpts(intf) for intf in switch_intfs) # Command to create controller entries - self.clist = [(self.name + c.name, '%s:%s:%d' % - (c.protocol, c.IP(), c.port)) + self.clist = [(self.name + c.name, f'{c.protocol}:{c.IP()}:{c.port}') for c in controllers] if self.listenPort: self.clist.append((self.name + '-listen', - 'ptcp:%s' % self.listenPort)) + f'ptcp:{self.listenPort}')) ccmd = '-- --id=@%s create Controller target=\\"%s\\"' if self.reconnectms: - ccmd += ' max_backoff=%d' % self.reconnectms + ccmd += f' max_backoff={self.reconnectms}' for param, value in self.controller_params.items(): - ccmd += ' %s=%s' % (param, value) + ccmd += f' {param}={value}' cargs = ' '.join(ccmd % (name, target) for name, target in self.clist) # Controller ID list cids = ','.join('@%s' % name for name, _target in self.clist) # Try to delete any existing bridges with the same name if not self.isOldOVS(): - cargs += ' -- --if-exists del-br %s' % self + cargs += f' -- --if-exists del-br {self}' # One ovs-vsctl command to rule them all! self.vsctl(cargs - + ' -- add-br %s' % self - + ' -- set bridge %s controller=[%s]' % (self, cids) + + f' -- add-br {self}' + + f' -- set bridge {self} controller=[{cids}]' + self.bridgeOpts() + intfs) # switch interfaces on mininet host, must have no IP config. for intf in switch_intfs: for ipv in (4, 6): - self.cmd('ip -%u addr flush dev %s' % (ipv, intf)) - assert self.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % intf) == '' + self.cmd(f'ip -{ipv} addr flush dev {intf}') + assert self.cmd(f'echo 1 > /proc/sys/net/ipv6/conf/{intf}/disable_ipv6') == '' # If necessary, restore TC config overwritten by OVS if not self.batch: for intf in self.intfList(): @@ -316,8 +314,7 @@ def _get_sid_prefix(ports_served): id_chars = ''.join(sorted(string.ascii_letters + string.digits)) # pytype: disable=module-attr id_a = int(ports_served / len(id_chars)) id_b = ports_served - (id_a * len(id_chars)) - return '%s%s' % ( - id_chars[id_a], id_chars[id_b]) + return f'{id_chars[id_a]}{id_chars[id_b]}' def _add_tagged_host(self, sid_prefix, tagged_vids, host_n): """Add a single tagged test host.""" @@ -338,7 +335,7 @@ def _add_extended_host(self, sid_prefix, host_n, e_cls, tmpdir): def _add_faucet_switch(self, sid_prefix, dpid, hw_dpid, ovs_type): """Add a FAUCET switch.""" switch_cls = FaucetSwitch - switch_name = 's%s' % sid_prefix + switch_name = f's{sid_prefix}' self.switch_dpids[switch_name] = dpid self.dpid_names[dpid] = switch_name if hw_dpid and hw_dpid == dpid: @@ -460,7 +457,7 @@ class BaseFAUCET(Controller): def __init__(self, name, tmpdir, controller_intf=None, controller_ipv6=False, cargs='', **kwargs): - name = '%s-%u' % (name, os.getpid()) + name = f'{name}-{os.getpid()}' self.tmpdir = tmpdir self.controller_intf = controller_intf self.controller_ipv6 = controller_ipv6 @@ -475,13 +472,13 @@ def _add_cargs(self, cargs, name): socket_type = socket.AF_INET6 self.controller_ip = netifaces.ifaddresses( # pylint: disable=c-extension-no-member self.controller_intf)[socket_type][0]['addr'] - ofp_listen_host_arg = '--ryu-ofp-listen-host=%s' % self.controller_ip + ofp_listen_host_arg = f'--ryu-ofp-listen-host={self.controller_ip}' self.pid_file = os.path.join(self.tmpdir, name + '.pid') - pid_file_arg = '--ryu-pid-file=%s' % self.pid_file + pid_file_arg = f'--ryu-pid-file={self.pid_file}' ryu_conf_file = os.path.join(self.tmpdir, 'ryu.conf') with open(ryu_conf_file, 'w', encoding='utf-8') as ryu_conf: ryu_conf.write(self.RYU_CONF) - ryu_conf_arg = '--ryu-config-file=%s' % ryu_conf_file + ryu_conf_arg = f'--ryu-config-file={ryu_conf_file}' return ' '.join(( self.BASE_CARGS, pid_file_arg, ryu_conf_arg, ofp_listen_host_arg, cargs)) @@ -500,16 +497,15 @@ def _start_tcpdump(self): '-U', '-q', '-W 1', # max files 1 - '-G %u' % (self.MAX_CTL_TIME - 1), - '-c %u' % (self.MAX_OF_PKTS), - '-i %s' % self.controller_intf, - '-w %s' % self.ofcap, - 'tcp and port %u' % self.port, + f'-G {self.MAX_CTL_TIME - 1}', + f'-c {self.MAX_OF_PKTS}', + f'-i {self.controller_intf}', + f'-w {self.ofcap}', + f'tcp and port {self.port}', '>/dev/null', '2>/dev/null', )) - self.cmd('timeout %s tcpdump %s &' % ( - self.MAX_CTL_TIME, tcpdump_args)) + self.cmd(f'timeout {self.MAX_CTL_TIME} tcpdump {tcpdump_args} &') for _ in range(5): if os.path.exists(self.ofcap): return @@ -524,9 +520,9 @@ def _tls_cargs(ofctl_port, ctl_privkey, ctl_cert, ca_certs): (ctl_cert, 'ryu-ctl-cert'), (ca_certs, 'ryu-ca-certs')): if carg_val: - tls_cargs.append(('--%s=%s' % (carg_key, carg_val))) + tls_cargs.append((f'--{carg_key}={carg_val}')) if tls_cargs: - tls_cargs.append(('--ryu-ofp-ssl-listen-port=%u' % ofctl_port)) + tls_cargs.append((f'--ryu-ofp-ssl-listen-port={ofctl_port}')) return ' '.join(tls_cargs) def _command(self, env, tmpdir, name, args): @@ -534,7 +530,7 @@ def _command(self, env, tmpdir, name, args): env_vars = [] for var, val in sorted(env.items()): env_vars.append('='.join((var, val))) - script_wrapper_name = os.path.join(tmpdir, 'start-%s.sh' % name) + script_wrapper_name = os.path.join(tmpdir, f'start-{name}.sh') cprofile_args = '' if self.CPROFILE: cprofile_args = 'python3 -m cProfile -s time' @@ -549,7 +545,7 @@ def _command(self, env, tmpdir, name, args): cprofile_args, args)) script_wrapper.write(faucet_cli) - return '/bin/sh %s' % script_wrapper_name + return f'/bin/sh {script_wrapper_name}' def ryu_pid(self): """Return PID of ryu-manager process.""" @@ -606,12 +602,12 @@ def _stop_cap(self): """Stop tcpdump for OF port and run tshark to decode it.""" if os.path.exists(self.ofcap): self.cmd(' '.join(['fuser', '-15', '-k', self.ofcap])) - text_ofcap_log = '%s.txt' % self.ofcap + text_ofcap_log = f'{self.ofcap}.txt' with open(text_ofcap_log, 'w', encoding='utf-8') as text_ofcap: subprocess.call( ['timeout', str(self.MAX_CTL_TIME), 'tshark', '-l', '-n', '-Q', - '-d', 'tcp.port==%u,openflow' % self.port, + '-d', f'tcp.port=={self.port},openflow', '-O', 'openflow_v4', '-Y', 'openflow_v4', '-r', self.ofcap], @@ -651,8 +647,8 @@ def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, self.ofctl_port = mininet_test_util.find_free_port( ports_sock, test_name) cargs = ' '.join(( - '--ryu-wsapi-host=%s' % mininet_test_util.LOCALHOSTV6, - '--ryu-wsapi-port=%u' % self.ofctl_port, + f'--ryu-wsapi-host={mininet_test_util.LOCALHOSTV6}', + f'--ryu-wsapi-port={self.ofctl_port}', self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs))) super().__init__( name, diff --git a/debian/control b/debian/control index 8014fcaf9d..cd0333c048 100644 --- a/debian/control +++ b/debian/control @@ -23,7 +23,7 @@ Build-Depends: debhelper (>=9), python3-pytricia, Standards-Version: 3.9.6 Homepage: https://github.com/faucetsdn/faucet -X-Python3-Version: >= 3.4 +X-Python3-Version: >= 3.6 Vcs-Git: https://github.com/faucetsdn/faucet.git Vcs-Browser: https://github.com/faucetsdn/faucet @@ -40,7 +40,7 @@ Depends: python3-influxdb (>= 2.12.0), python3-beka (>= 0.3.5), python3-beka (<< 0.3.6), python3-chewie (>= 0.0.22), python3-chewie (<< 0.0.23), python3-pytricia (>= 1.0.0), - python3:any (>= 3.5~), + python3:any (>= 3.6~), Suggests: python-faucet-doc, faucet, gauge Description: source code for faucet and gauge (Python3) Python3 library that contains the source code for the Faucet open source diff --git a/faucet/__main__.py b/faucet/__main__.py index 88145c691f..b1fea5f454 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -24,10 +24,10 @@ from pbr.version import VersionInfo -if sys.version_info < (3,) or sys.version_info < (3, 5): +if sys.version_info < (3,) or sys.version_info < (3, 6): raise ImportError("""You are trying to run faucet on python {py} -Faucet is not compatible with python {py}, please upgrade to python 3.5 or newer.""" +Faucet is not compatible with python {py}, please upgrade to python 3.6 or newer.""" .format(py='.'.join([str(v) for v in sys.version_info[:3]]))) RYU_OPTIONAL_ARGS = [ diff --git a/setup.cfg b/setup.cfg index bf09ab5684..147420babc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,9 +12,10 @@ classifier = Topic :: System :: Networking Natural Language :: English Programming Language :: Python - Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 + Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.9 Operating System :: Unix keywords = openflow diff --git a/setup.py b/setup.py index e9a6f96a48..83a19a7ed1 100755 --- a/setup.py +++ b/setup.py @@ -15,15 +15,15 @@ if sys.version_info < (3,): print("""You are trying to install faucet on python {py} -Faucet is not compatible with python 2, please upgrade to python 3.5 or newer.""" +Faucet is not compatible with python 2, please upgrade to python 3.6 or newer.""" .format(py='.'.join([str(v) for v in sys.version_info[:3]])), file=sys.stderr) sys.exit(1) -elif sys.version_info < (3, 5): +elif sys.version_info < (3, 6): print("""You are trying to install faucet on python {py} Faucet 1.9.0 and above are no longer compatible with older versions of python 3. -Please upgrade to python 3.5 or newer.""" +Please upgrade to python 3.6 or newer.""" .format(py='.'.join([str(v) for v in sys.version_info[:3]]))) sys.exit(1) @@ -90,7 +90,7 @@ def setup_faucet_log(): setup( name='faucet', setup_requires=['pbr>=1.9', 'setuptools>=17.1'], - python_requires='>=3.5', + python_requires='>=3.6', pbr=True ) From 653f9adbeab2e70253b7600980650e1224e57865 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 09:05:42 -0700 Subject: [PATCH 050/231] move to f-strings fro clib mininet test util --- clib/mininet_test_util.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/clib/mininet_test_util.py b/clib/mininet_test_util.py index a7d4467dc0..f91bcecb88 100644 --- a/clib/mininet_test_util.py +++ b/clib/mininet_test_util.py @@ -35,8 +35,7 @@ def lsof_tcp_listening_cmd(port, ipv, state, terse): terse_arg = '' if terse: terse_arg = '-t' - return 'lsof -b -P -n %s -sTCP:%s -i %u -a -i tcp:%u' % ( - terse_arg, state, ipv, port) + return f'lsof -b -P -n {terse_arg} -sTCP:{state} -i {ipv} -a -i tcp:{port}' def lsof_udp_listening_cmd(port, terse): @@ -44,8 +43,7 @@ def lsof_udp_listening_cmd(port, terse): terse_arg = '' if terse: terse_arg = '-t' - return 'lsof -b -P -n %s -i udp:%u -a' % ( - terse_arg, port) + return f'lsof -b -P -n {terse_arg} -i udp:{port} -a' def tcp_listening_cmd(port, ipv=4, state='LISTEN', terse=True): @@ -103,14 +101,14 @@ def test_server_request(ports_socket, name, command): assert name is not None sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(ports_socket) - sock.sendall(('%s,%s\n' % (command, name)).encode()) - output('%s %s\n' % (name, command)) + sock.sendall((f'{command},{name}\n').encode()) + output(f'{name} {command}\n') buf = receive_sock_line(sock) responses = [int(i) for i in buf.split('\n')] sock.close() if len(responses) == 1: responses = responses[0] - output('%s %s: %u\n' % (name, command, responses)) + output(f'{name} {command}: {responses}\n') return responses @@ -126,7 +124,7 @@ def find_free_port(ports_socket, name): port = test_server_request(ports_socket, request_name, GETPORT) if not tcp_listening(port): return port - error('port %u is busy, try another' % port) + error(f'port {port} is busy, try another') def find_free_udp_port(ports_socket, name): @@ -135,7 +133,7 @@ def find_free_udp_port(ports_socket, name): port = test_server_request(ports_socket, request_name, GETPORT) if not udp_listening(port): return port - error('port %u is busy, try another' % port) + error(f'port {port} is busy, try another') def return_free_ports(ports_socket, name): @@ -213,16 +211,16 @@ def queue_free_ports(min_queue_size): response_str = '' if isinstance(response, int): response = [response] - response_str = ''.join(['%u\n' % i for i in response]) + response_str = ''.join([f'{i for i in response}\n']) connection.sendall(response_str.encode()) # pylint: disable=no-member connection.close() def timeout_cmd(cmd, timeout): """Return a command line prefaced with a timeout wrappers and stdout/err unbuffered.""" - return 'timeout -sKILL %us stdbuf -o0 -e0 %s' % (timeout, cmd) + return f'timeout -sKILL {timeout}s stdbuf -o0 -e0 {cmd}' def timeout_soft_cmd(cmd, timeout): """Same as timeout_cmd buf using SIGTERM on timeout.""" - return 'timeout %us stdbuf -o0 -e0 %s' % (timeout, cmd) + return f'timeout {timeout}s stdbuf -o0 -e0 {cmd}' From 88c4d8f6742c38d039e837b286ff3fe4d4a08891 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 09:18:47 -0700 Subject: [PATCH 051/231] comprehension not forcing type with f-string --- clib/mininet_test_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_util.py b/clib/mininet_test_util.py index f91bcecb88..26831280ec 100644 --- a/clib/mininet_test_util.py +++ b/clib/mininet_test_util.py @@ -211,7 +211,7 @@ def queue_free_ports(min_queue_size): response_str = '' if isinstance(response, int): response = [response] - response_str = ''.join([f'{i for i in response}\n']) + response_str = ''.join(['%u\n' % i for i in response]) connection.sendall(response_str.encode()) # pylint: disable=no-member connection.close() From 66f4e36f67719b01b523f9f63e2cfa49e2873c83 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 09:46:48 -0700 Subject: [PATCH 052/231] move to f-strings for clib mininet test base topo --- clib/mininet_test_base_topo.py | 72 ++++++++++++++++------------------ 1 file changed, 34 insertions(+), 38 deletions(-) diff --git a/clib/mininet_test_base_topo.py b/clib/mininet_test_base_topo.py index ea594ad03c..8ecf23f913 100644 --- a/clib/mininet_test_base_topo.py +++ b/clib/mininet_test_base_topo.py @@ -67,23 +67,22 @@ def _dp_ports(self): def get_gauge_watcher_config(self): """Return gauge watcher config""" - return """ + return f""" port_stats: - dps: ['%s'] + dps: ['{self.topo.switches_by_id[0]}'] type: 'port_stats' interval: 5 db: 'stats_file' port_state: - dps: ['%s'] + dps: ['{self.topo.switches_by_id[0]}'] type: 'port_state' interval: 5 db: 'state_file' flow_table: - dps: ['%s'] + dps: ['{self.topo.switches_by_id[0]}'] type: 'flow_table' interval: 5 - db: 'flow_dir' -""" % (self.topo.switches_by_id[0], self.topo.switches_by_id[0], self.topo.switches_by_id[0]) + db: 'flow_dir'""" def first_switch(self): """Return the first switch""" @@ -91,7 +90,7 @@ def first_switch(self): def port_labels(self, port_no): """Return regex for port label""" - port_name = 'b%u' % port_no + port_name = f'b{port_no}' return {'port': port_name, 'port_description': r'.+'} @staticmethod @@ -101,18 +100,18 @@ def acls(): def faucet_vip(self, i): """Faucet VLAN VIP""" - return '10.%u.0.254/%u' % (i + 1, self.NETPREFIX) + return f'10.{i + 1}.0.254/{self.NETPREFIX}' @staticmethod def faucet_mac(i): """Faucet VLAN MAC""" - return '00:00:00:00:00:%u%u' % (i + 1, i + 1) + return f'00:00:00:00:00:{i + 1}{i + 1}' def host_ip_address(self, host_index, vlan_index): """Create a string of the host IP address""" if isinstance(vlan_index, (list, tuple)): vlan_index = vlan_index[0] - return '10.%u.0.%u/%u' % (vlan_index + 1, host_index + 1, self.NETPREFIX) + return f'10.{vlan_index + 1}.0.{host_index + 1}/{self.NETPREFIX}' def host_ping(self, src_host, dst_ip, intf=None): """Default method to ping from one host to an IP address""" @@ -256,24 +255,24 @@ def setup_lacp_bonds(self): for i in self.host_port_maps[host_id]] bond_members = [ pair[0].name for switch in lacp_switches for pair in host.connectionsTo(switch)] - bond_name = 'bond%u' % (bond_index) + bond_name = f'bond{bond_index}' self.host_information[host_id]['bond'] = bond_name for bond_member in bond_members: # Deconfigure bond members self.quiet_commands(host, ( - 'ip link set %s down' % bond_member, - 'ip address flush dev %s' % bond_member)) + f'ip link set {bond_member} down', + f'ip address flush dev {bond_member}')) # Configure bond interface self.quiet_commands(host, ( - ('ip link add %s address 0e:00:00:00:00:99 ' + (f'ip link add {bond_name} address 0e:00:00:00:00:99 ' 'type bond mode 802.3ad lacp_rate fast miimon 100 ' - 'xmit_hash_policy layer2+3') % (bond_name), - 'ip add add %s/%s dev %s' % (orig_ip, self.NETPREFIX, bond_name), - 'ip link set %s up' % bond_name)) + 'xmit_hash_policy layer2+3'), + f'ip add add {orig_ip}/{self.NETPREFIX} dev {bond_name}', + f'ip link set {bond_name} up')) # Add bond members for bond_member in bond_members: self.quiet_commands(host, ( - 'ip link set dev %s master %s' % (bond_member, bond_name),)) + f'ip link set dev {bond_member} master {bond_name}',)) bond_index += 1 # Return the ports to UP for dp_i, ports in self.host_port_maps[host_id].items(): @@ -322,11 +321,11 @@ def verify_stack_hosts(self, verify_bridge_local_rule=True, retries=3): """Verify hosts with stack LLDP messages""" lldp_cap_files = [] for host in self.hosts_name_ordered(): - lldp_cap_file = os.path.join(self.tmpdir, '%s-lldp.cap' % host) + lldp_cap_file = os.path.join(self.tmpdir, f'{host}-lldp.cap') lldp_cap_files.append(lldp_cap_file) host.cmd(timeout_cmd( - 'tcpdump -U -n -c 1 -i %s -w %s ether proto 0x88CC and not ether src %s &' % ( - host.defaultIntf(), host.MAC(), lldp_cap_file), 60)) + f'tcpdump -U -n -c 1 -i {host.defaultIntf()} -w {host.MAC()}' + f' ether proto 0x88CC and not ether src {lldp_cap_file} &', 60)) # should not flood LLDP from hosts self.verify_lldp_blocked(self.hosts_name_ordered()) # hosts should see no LLDP probes @@ -385,7 +384,7 @@ def verify_stack_up(self, prop=1.0, timeout=25): prop_up = links_up / links if prop_up >= prop: return - self.fail('not enough links up: %f / %f' % (links_up, links)) + self.fail(f'not enough links up: {links_up} / {links}') def verify_stack_down(self): """Verify all stack ports are down""" @@ -439,14 +438,14 @@ def verify_no_arp_storm(self, ping_host, tcpdump_host): if self.topo.isSwitch(dst_node): switch_to_switch_links += 1 num_arp_expected = switch_to_switch_links * 2 - tcpdump_filter = 'arp and ether src %s' % ping_host.MAC() + tcpdump_filter = f'arp and ether src {ping_host.MAC()}' tcpdump_txt = self.tcpdump_helper( tcpdump_host, tcpdump_filter, [ - lambda: ping_host.cmd('arp -d %s' % tcpdump_host.IP()), - lambda: ping_host.cmd('ping -c1 %s' % tcpdump_host.IP())], + lambda: ping_host.cmd(f'arp -d {tcpdump_host.IP()}'), + lambda: ping_host.cmd(f'ping -c1 {tcpdump_host.IP()}')], packets=(num_arp_expected + 1)) num_arp_received = len(re.findall( - 'who-has %s tell %s' % (tcpdump_host.IP(), ping_host.IP()), tcpdump_txt)) + f'who-has {tcpdump_host.IP()} tell {ping_host.IP()}', tcpdump_txt)) self.assertTrue(num_arp_received) self.assertLessEqual(num_arp_received, num_arp_expected) @@ -477,15 +476,15 @@ def verify_tunnel_established(self, src_host, dst_host, other_host, packets=3, d tcpdump_text = self.tcpdump_helper( dst_host, 'icmp[icmptype] == 8', [ # need to set static ARP as only ICMP is tunnelled. - lambda: src_host.cmd('arp -s %s %s' % (other_host.IP(), other_host.MAC())), - lambda: src_host.cmd('ping -c%u -t1 %s' % (packets, other_host.IP())) + lambda: src_host.cmd(f'arp -s {other_host.IP()} {other_host.MAC()}'), + lambda: src_host.cmd(f'ping -c{packets} -t1 {other_host.IP()}') ], packets=1, timeout=(packets + 1), ) self.wait_nonzero_packet_count_flow( icmp_match, table_id=self._PORT_ACL_TABLE, ofa_match=False, dpid=dpid) self.assertTrue(re.search( - '%s: ICMP echo request' % other_host.IP(), tcpdump_text + f'{other_host.IP()}: ICMP echo request', tcpdump_text ), 'Tunnel was not established') def verify_one_broadcast(self, from_host, to_hosts): @@ -497,8 +496,7 @@ def verify_one_broadcast(self, from_host, to_hosts): received_broadcasts.append(to_host) received_names = {host.name: host for host in received_broadcasts} self.assertEqual(len(received_broadcasts), 1, - 'Received not exactly one broadcast from %s: %s' % - (from_host.name, received_names)) + f'Received not exactly one broadcast from {from_host.name}: {received_names}') def map_int_ext_hosts(self): """ @@ -577,7 +575,7 @@ def validate_with_externals_down_fails(self, dp_name): self.validate_with_externals_down(dp_name) except AssertionError: asserted = True - self.assertTrue(asserted, 'Did not fail as expected for %s' % dp_name) + self.assertTrue(asserted, f'Did not fail as expected for {dp_name}' def verify_intervlan_routing(self): """Verify intervlan routing but for LAG host use bond interface""" @@ -610,15 +608,13 @@ def is_routed_vlans(self, vlan_a, vlan_b): def bcast_dst_blocked_helper(self, port, first_host, second_host, success_re, retries): """Helper for checking broadcast destination has been blocked""" - tcpdump_filter = 'udp and ether src %s and ether dst %s' % ( - first_host.MAC(), "ff:ff:ff:ff:ff:ff") + tcpdump_filter = f'udp and ether src {first_host.MAC()} and ether dst ff:ff:ff:ff:ff:ff' target_addr = str(self.faucet_vips[0].network.broadcast_address) for _ in range(retries): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ partial(first_host.cmd, ( - 'date | socat - udp-datagram:%s:%d,broadcast' % ( - target_addr, port)))], + f'date | socat - udp-datagram:{target_addr}:{port},broadcast'))], packets=1) if re.search(success_re, tcpdump_txt): return True @@ -697,7 +693,7 @@ def require_linux_bond_up(self, host_id): host = self.host_information[host_id]['host'] bond_name = self.host_information[host_id]['bond'] for _ in range(self.LACP_TIMEOUT * 2): - result = host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond_name) + result = host.cmd(f'cat /proc/net/bonding/{bond_name}|sed "s/[ \t]*$//g"') result = '\n'.join([line.rstrip() for line in result.splitlines()]) with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w', encoding='utf-8') as state_file: state_file.write(result) @@ -715,7 +711,7 @@ def require_linux_bond_up(self, host_id): synced_state_txt.strip() self.assertFalse( re.search(synced_state_txt, result), - msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (result, synced_state_txt)) + msg=f'LACP did not synchronize: {result}\n\nexpected:\n\n{synced_state_txt}') def verify_lag_connectivity(self, host_id): """Verify LAG connectivity""" From 4ab549a9d3e6540687096ecaaa15d2437dca326d Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 10:00:49 -0700 Subject: [PATCH 053/231] missing ) --- clib/mininet_test_base_topo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base_topo.py b/clib/mininet_test_base_topo.py index 8ecf23f913..a84692ebc1 100644 --- a/clib/mininet_test_base_topo.py +++ b/clib/mininet_test_base_topo.py @@ -575,7 +575,7 @@ def validate_with_externals_down_fails(self, dp_name): self.validate_with_externals_down(dp_name) except AssertionError: asserted = True - self.assertTrue(asserted, f'Did not fail as expected for {dp_name}' + self.assertTrue(asserted, f'Did not fail as expected for {dp_name}') def verify_intervlan_routing(self): """Verify intervlan routing but for LAG host use bond interface""" From 2f875ae3c85c8d9f4fd8cc6b5e7a7517ee73ceae Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 10:10:55 -0700 Subject: [PATCH 054/231] fix lint in clib tcpdump helper --- clib/tcpdump_helper.py | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 7dea4ce81a..92f20fde18 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -32,10 +32,10 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, tcpdump_flags = vflags tcpdump_flags += ' -Z root' - tcpdump_flags += ' -c %u' % packets if packets else '' - tcpdump_flags += ' -w %s' % pcap_out if pcap_out else '' - tcpdump_cmd = 'tcpdump -i %s %s --immediate-mode -e -n -U %s' % ( - self.intf_name, tcpdump_flags, tcpdump_filter) + tcpdump_flags += f' -c {packets if packets else ""}' + tcpdump_flags += f' -w {pcap_out if pcap_out else ""}' + tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags}' + f' --immediate-mode -e -n -U {tcpdump_filter}' pipe_cmd = tcpdump_cmd if timeout: pipe_cmd = mininet_test_util.timeout_soft_cmd(tcpdump_cmd, timeout) @@ -50,8 +50,7 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, shell=False) if self.stream(): - debug('tcpdump_helper stream fd %s %s' % ( - self.stream().fileno(), self.intf_name)) + debug(f'tcpdump_helper stream fd {self.stream().fileno()} {self.intf_name)}' self.readbuf = '' self.set_blocking(blocking) @@ -81,7 +80,7 @@ def execute(self): line = self.next_line() if not line: break - debug('tcpdump_helper fd %d line "%s"' % (self.stream().fileno(), line)) + debug(f'tcpdump_helper fd {self.stream().fileno()} line "{line}"') tcpdump_txt += line.strip() return tcpdump_txt @@ -91,7 +90,7 @@ def terminate(self): return -1 try: - debug('tcpdump_helper terminate fd %s' % self.stream().fileno()) + debug(f'tcpdump_helper terminate fd {self.stream().fileno()}') self.pipe.terminate() result = self.pipe.wait() if result == 124: @@ -101,8 +100,7 @@ def terminate(self): self.pipe = None return result except EnvironmentError as err: - error('Error closing tcpdump_helper fd %d: %s' % ( - self.pipe.stdout.fileno(), err)) + error(f'Error closing tcpdump_helper fd {self.pipe.stdout.fileno()}: {err}') return -2 def readline(self): @@ -131,13 +129,13 @@ def next_line(self): try: line = self.readline() except OSError as err: - if err.errno == errno.EWOULDBLOCK or err.errno == errno.EAGAIN: + if err.errno in (errno.EWOULDBLOCK, errno.EAGAIN): return '' raise - assert line or self.started, 'tcpdump did not start: %s' % self.last_line.strip() + assert line or self.started, f'tcpdump did not start: {self.last_line.strip()}' if self.started: return line - if re.search('listening on %s' % self.intf_name, line): + if re.search(f'listening on {self.intf_name}', line): self.started = True # When we see tcpdump start, then call provided functions. if self.funcs is not None: From 777475da776cfa7fe6853e8efc696f7f10a1b4c6 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 10:16:07 -0700 Subject: [PATCH 055/231] fix multiline --- clib/tcpdump_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 92f20fde18..42263356a1 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -34,8 +34,8 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, tcpdump_flags += ' -Z root' tcpdump_flags += f' -c {packets if packets else ""}' tcpdump_flags += f' -w {pcap_out if pcap_out else ""}' - tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags}' - f' --immediate-mode -e -n -U {tcpdump_filter}' + tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags}' \ + f' --immediate-mode -e -n -U {tcpdump_filter}' pipe_cmd = tcpdump_cmd if timeout: pipe_cmd = mininet_test_util.timeout_soft_cmd(tcpdump_cmd, timeout) From 39c9e52195341f2b22192957eb03e44efb07258b Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 10:18:47 -0700 Subject: [PATCH 056/231] missing ) --- clib/tcpdump_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 42263356a1..4f615cc6da 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -50,7 +50,7 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, shell=False) if self.stream(): - debug(f'tcpdump_helper stream fd {self.stream().fileno()} {self.intf_name)}' + debug(f'tcpdump_helper stream fd {self.stream().fileno()} {self.intf_name)}') self.readbuf = '' self.set_blocking(blocking) From d965632a8b0a3688590ddc9034638c3e455b6f89 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 10:23:04 -0700 Subject: [PATCH 057/231] extra ) --- clib/tcpdump_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 4f615cc6da..c5d7886ae8 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -50,7 +50,7 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, shell=False) if self.stream(): - debug(f'tcpdump_helper stream fd {self.stream().fileno()} {self.intf_name)}') + debug(f'tcpdump_helper stream fd {self.stream().fileno()} {self.intf_name}') self.readbuf = '' self.set_blocking(blocking) From ae77410a555528bf2b153052d676398ff460dd45 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:07:26 -0700 Subject: [PATCH 058/231] move to f-strings for clib mininet test base --- clib/mininet_test_base.py | 475 +++++++++++++++++--------------------- clib/tcpdump_helper.py | 3 +- 2 files changed, 214 insertions(+), 264 deletions(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index a62ddd9f44..8c83c828a2 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -209,7 +209,7 @@ def _set_var(self, controller, var, value): def _set_vars(self): """Set controller additional variables""" for c_index in range(self.NUM_FAUCET_CONTROLLERS): - self._set_var('faucet-%s' % c_index, 'FAUCET_PROMETHEUS_PORT', + self._set_var(f'faucet-{c_index}', 'FAUCET_PROMETHEUS_PORT', str(self.faucet_prom_ports[c_index])) def _set_var_path(self, controller, var, path): @@ -223,25 +223,25 @@ def _set_static_vars(self): self.event_sock_dir = tempfile.mkdtemp() self.event_socks = [] for c_index in range(self.NUM_FAUCET_CONTROLLERS): - event_sock = os.path.join(self.event_sock_dir, 'event-%s.sock' % c_index) + event_sock = os.path.join(self.event_sock_dir, f'event-{c_index}.sock') self.event_socks.append(event_sock) - self._set_var('faucet-%s' % c_index, 'FAUCET_LOG_LEVEL', str(self.LOG_LEVEL)) - self._set_var('faucet-%s' % c_index, 'FAUCET_CONFIG_STAT_RELOAD', self.STAT_RELOAD) - self._set_var('faucet-%s' % c_index, 'FAUCET_EVENT_SOCK', event_sock) - self._set_var('faucet-%s' % c_index, 'FAUCET_EVENT_SOCK_HEARTBEAT', + self._set_var(f'faucet-{c_index}', 'FAUCET_LOG_LEVEL', str(self.LOG_LEVEL)) + self._set_var(f'faucet-{c_index}', 'FAUCET_CONFIG_STAT_RELOAD', self.STAT_RELOAD) + self._set_var(f'faucet-{c_index}', 'FAUCET_EVENT_SOCK', event_sock) + self._set_var(f'faucet-{c_index}', 'FAUCET_EVENT_SOCK_HEARTBEAT', self.EVENT_SOCK_HEARTBEAT) - self._set_var('faucet-%s' % c_index, 'FAUCET_PROMETHEUS_ADDR', + self._set_var(f'faucet-{c_index}', 'FAUCET_PROMETHEUS_ADDR', mininet_test_util.LOCALHOSTV6) - self._set_var_path('faucet-%s' % c_index, 'FAUCET_CONFIG', 'faucet.yaml') - self._set_var_path('faucet-%s' % c_index, 'FAUCET_LOG', 'faucet-%s.log' % c_index) - self._set_var_path('faucet-%s' % c_index, 'FAUCET_EXCEPTION_LOG', - 'faucet-%s-exception.log' % c_index) + self._set_var_path(f'faucet-{c_index}', 'FAUCET_CONFIG', 'faucet.yaml') + self._set_var_path(f'faucet-{c_index}', 'FAUCET_LOG', f'faucet-{c_index}.log') + self._set_var_path(f'faucet-{c_index}', 'FAUCET_EXCEPTION_LOG', + f'faucet-{c_index}-exception.log') for c_index in range(self.NUM_GAUGE_CONTROLLERS): - self._set_var_path('gauge-%s' % c_index, 'GAUGE_CONFIG', 'gauge.yaml') - self._set_var_path('gauge-%s' % c_index, 'GAUGE_LOG', 'gauge-%s.log' % c_index) - self._set_var_path('gauge-%s' % c_index, 'GAUGE_EXCEPTION_LOG', - 'gauge-%s-exception.log' % c_index) + self._set_var_path(f'gauge-{c_index}', 'GAUGE_CONFIG', 'gauge.yaml') + self._set_var_path(f'gauge-{c_index}', 'GAUGE_LOG', f'gauge-{c_index}.log') + self._set_var_path(f'gauge-{c_index}', 'GAUGE_EXCEPTION_LOG', + f'gauge-{c_index}-exception.log') self.faucet_config_path = self.env['faucet-0']['FAUCET_CONFIG'] self.gauge_config_path = self.env['gauge-0']['GAUGE_CONFIG'] self.debug_log_path = os.path.join( @@ -286,7 +286,7 @@ def _enable_event_log(self, timeout=None): # as an alternative we might possibly use something like # `with popen(cmd...) as proc`to clean up on exceptions controller.cmd(mininet_test_util.timeout_cmd( - 'nc -U %s > %s &' % (sock, self.event_log), timeout)) + f'nc -U {sock} > {self.event_log} &', timeout)) # pylint: disable=inconsistent-return-statements def _wait_until_matching_event(self, match_func, timeout=30): @@ -337,8 +337,8 @@ def _annotate_interfaces_conf(yaml_conf): number = intf_conf.get('number', port_no) if isinstance(number, int): port_no = number - assert isinstance(number, int), '%u %s' % (intf_key, orig_intf_conf) - intf_name = 'b%u' % port_no + assert isinstance(number, int), f'{intf_key} {orig_intf_conf}' + intf_name = f'b{port_no}' intf_conf.update({'name': intf_name, 'description': intf_name}) remap_interfaces_yaml[intf_key] = intf_conf yaml_conf_remap['dps'][dp_key]['interfaces'] = remap_interfaces_yaml @@ -358,7 +358,7 @@ def _write_yaml_conf(yaml_path, yaml_conf): conf_file_tmp_str = conf_file_tmp.read() assert new_conf_str == conf_file_tmp_str if os.path.exists(yaml_path): - shutil.copyfile(yaml_path, '%s.%f' % (yaml_path, time.time())) + shutil.copyfile(yaml_path, f'{yaml_path}.{time.time()}') os.rename(conf_file_tmp_name, yaml_path) def _init_faucet_config(self): @@ -398,8 +398,8 @@ def _wait_load(self, load_retries=10): load = os.getloadavg()[0] if load < self.max_test_load: return - output('load average too high %f, waiting' % load) - self.fail('load average %f consistently too high' % load) + output(f'load average too high {load}, waiting') + self.fail(f'load average {load} consistently too high') def _allocate_config_ports(self): for port_name in self.config_ports: @@ -409,7 +409,7 @@ def _allocate_config_ports(self): port = mininet_test_util.find_free_port( self.ports_sock, self._test_name()) self.config_ports[port_name] = port - output('allocating port %u for %s' % (port, port_name)) + output(f'allocating port {port} for {port_name}') def _allocate_faucet_ports(self): for c_index in range(self.NUM_FAUCET_CONTROLLERS): @@ -461,7 +461,7 @@ def setUp(self): @staticmethod def hostns(host): - return '%s' % host.name + return f'{host.name}' def dump_switch_flows(self, switch): """Dump switch information to tmpdir""" @@ -469,13 +469,12 @@ def dump_switch_flows(self, switch): 'dump-flows', 'dump-groups', 'dump-meters', 'dump-group-stats', 'dump-ports', 'dump-ports-desc', 'meter-stats'): - switch_dump_name = os.path.join(self.tmpdir, '%s-%s.log' % (switch.name, dump_cmd)) + switch_dump_name = os.path.join(self.tmpdir, f'{switch.name}-{dump_cmd}.log') # TODO: occasionally fails with socket error. - switch.cmd('%s %s %s > %s' % (self.OFCTL, dump_cmd, switch.name, switch_dump_name), - success=None) + switch.cmd(f'{self.OFCTL} {dump_cmd} {switch.name} > {switch_dump_name}', success=None) for other_cmd in ('show', 'list controller', 'list manager'): - other_dump_name = os.path.join(self.tmpdir, '%s.log' % other_cmd.replace(' ', '')) - switch.cmd('%s %s > %s' % (self.VSCTL, other_cmd, other_dump_name)) + other_dump_name = os.path.join(self.tmpdir, f'{other_cmd.replace(" ", "")}.log') + switch.cmd(f'{self.VSCTL} {other_cmd} > {other_dump_name}') # pylint: disable=arguments-differ def tearDown(self, ignore_oferrors=False): @@ -484,15 +483,15 @@ def tearDown(self, ignore_oferrors=False): if self.NETNS: for host in self.hosts_name_ordered()[:1]: if self.get_host_netns(host): - self.quiet_commands(host, ['ip netns del %s' % self.hostns(host)]) + self.quiet_commands(host, [f'ip netns del {self.hostns(host)}']) first_switch = self.first_switch() if first_switch: - self.first_switch().cmd('ip link > %s' % os.path.join(self.tmpdir, 'ip-links.log')) + self.first_switch().cmd(f'ip link > {os.path.join(self.tmpdir, "ip-links.log")}') switch_names = [] for switch in self.net.switches: switch_names.append(switch.name) self.dump_switch_flows(switch) - switch.cmd('%s del-br %s' % (self.VSCTL, switch.name)) + switch.cmd(f'{self.VSCTL} del-br {switch.name}') self._stop_net() self.net = None if self.event_sock_dir and os.path.exists(self.event_sock_dir): @@ -530,7 +529,7 @@ def tearDown(self, ignore_oferrors=False): # Verify version is logged. self.assertTrue( self.matching_lines_from_file(r'^.+version\s+(\S+)$', logfile), - msg='no version logged in %s' % logfile) + msg=f'no version logged in {logfile}' # Verify no OFErrors. oferrors += '\n\n'.join(self.matching_lines_from_file(r'^.+(OFError.+)$', logfile)) if not ignore_oferrors: @@ -542,17 +541,17 @@ def _block_non_faucet_packets(self): def _cmd(cmd): with subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: stdout, stderr = proc.communicate() - self.assertFalse(stdout, msg='%s: %s' % (stdout, cmd)) - self.assertFalse(stderr, msg='%s: %s' % (stderr, cmd)) + self.assertFalse(stdout, msg=f'{stdout}: {cmd}') + self.assertFalse(stderr, msg=f'{stdout}: {cmd}') _cmd('ebtables --f OUTPUT') for phys_port in self.switch_map.values(): phys_mac = self.get_mac_of_intf(phys_port) for cmd in ( - 'ip link set dev %s up' % phys_port, - 'ip -4 addr flush dev %s' % phys_port, - 'ip -6 addr flush dev %s' % phys_port, - 'ebtables -A OUTPUT -s %s -o %s -j DROP' % (phys_mac, phys_port)): + f'ip link set dev {phys_port} up', + f'ip -4 addr flush dev {phys_port}', + f'ip -6 addr flush dev {phys_port}', + f'ebtables -A OUTPUT -s {phys_mac} -o {phys_port} -j DROP'): _cmd(cmd) def _attach_physical_switch(self): @@ -585,7 +584,7 @@ def _attach_physical_switch(self): # and blocked traffic to/from its meaningless MAC hw_mac = self.get_mac_of_intf(hw_name) self.assertFalse(hw_mac in hw_macs, - 'duplicate hardware MAC %s' % hw_mac) + f'duplicate hardware MAC {hw_mac}') hw_macs.add(hw_mac) # Create mininet Intf and attach it to the switch hw_intf = HWIntf(hw_name, node=switch) @@ -595,11 +594,11 @@ def _attach_physical_switch(self): src, dst = hw_port, ovs_port for flow in ( # Drop anything to or from the meaningless hw_mac - 'eth_src=%s,priority=2,actions=drop' % hw_mac, - 'eth_dst=%s,priority=2,actions=drop' % hw_mac, + f'eth_src={hw_mac},priority=2,actions=drop', + f'eth_dst={hw_mac},priority=2,actions=drop', # Forward traffic bidirectionally src <-> dst - 'in_port=%u,priority=1,actions=output:%u' % (src, dst), - 'in_port=%u,priority=1,actions=output:%u' % (dst, src)): + f'in_port={src},priority=1,actions=output:{dst}', + f'in_port={dst},priority=1,actions=output:{src}'): switch.cmd(self.OFCTL, 'add-flow', switch, flow) def create_port_map(self, dpid): @@ -653,13 +652,12 @@ def _start_check(self): for port_name, port in self.config_ports.items(): if port is not None and not port_name.startswith('gauge'): if not self._get_controller().listen_port(port): - return 'faucet not listening on %u (%s)' % ( - port, port_name) + return f'faucet not listening on {port} ({port_name})' return self._start_gauge_check() def _create_faucet_controller(self, index, intf, ipv6): port = self.faucet_of_ports[index] - name = 'faucet-%s' % index + name = f'faucet-{index}' faucet_controller = self.CONTROLLER_CLASS( name=name, tmpdir=self.tmpdir, controller_intf=intf, @@ -677,7 +675,7 @@ def _create_faucet_controller(self, index, intf, ipv6): def _create_gauge_controller(self, index, intf, ipv6): port = self.gauge_of_ports[index] - name = 'gauge-%s' % index + name = f'gauge-{index}' gauge_controller = mininet_test_topo.Gauge( name=name, tmpdir=self.tmpdir, env=self.env[name], @@ -756,7 +754,7 @@ def _start_faucet(self, controller_intf, controller_ipv6): # Existing controllers will be reused on the next cycle self._stop_net() last_error_txt += '\n\n' + self._dump_controller_logs() - error('%s: %s' % (self._test_name(), last_error_txt)) + error(f'{self._test_name()}: {last_error_txt}') time.sleep(mininet_test_util.MIN_PORT_AGE) if last_error_txt is not None: @@ -771,15 +769,14 @@ def _start_faucet(self, controller_intf, controller_ipv6): for host in self.hosts_name_ordered()[:1]: hostns = self.hostns(host) if self.get_host_netns(host): - self.quiet_commands(host, ['ip netns del %s' % hostns]) - self.quiet_commands(host, ['ip netns add %s' % hostns]) + self.quiet_commands(host, [f'ip netns del {hostns}']) + self.quiet_commands(host, [f'ip netns add {hostns}']) self.post_start_net() def _ofctl_rest_url(self, req): """Return control URL for Ryu ofctl module.""" - return 'http://[%s]:%u/%s' % ( - mininet_test_util.LOCALHOSTV6, self._get_controller().ofctl_port, req) + return f'http://[{mininet_test_util.LOCALHOSTV6}]:{self._get_controller().ofctl_port}/{req}' @staticmethod def _ofctl(req, params=None): @@ -837,8 +834,8 @@ def _portmod(self, int_dpid, port_no, config, mask): @staticmethod def _signal_proc_on_port(host, port, signal): - tcp_pattern = '%s/tcp' % port - fuser_out = host.cmd('fuser %s -k -%u' % (tcp_pattern, signal)) + tcp_pattern = f'{port}/tcp' + fuser_out = host.cmd(f'fuser {tcp_pattern} -k -{signal}') return re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out) def _get_ofchannel_logs(self): @@ -874,7 +871,7 @@ def _controllers_healthy(self): for c_index in range(self.NUM_FAUCET_CONTROLLERS): event_sock = self.event_socks[c_index] if event_sock and not os.path.exists(event_sock): - error('event socket %s not created\n' % event_sock) + error(f'event socket {event_sock} not created\n') return False return True @@ -917,8 +914,7 @@ def verify_no_exception(self, exception_log_name): self.assertEqual( '', exception_contents, - msg='%s log contains %s' % ( - exception_log_name, exception_contents)) + msg=f'{exception_log_name} log contains {exception_contents}') @staticmethod def tcpdump_helper(*args, **kwargs): @@ -926,44 +922,39 @@ def tcpdump_helper(*args, **kwargs): @staticmethod def scapy_template(packet, iface, count=1): - return ('python3 -c \"from scapy.all import * ; sendp(%s, iface=\'%s\', count=%u)"' % ( - packet, iface, count)) + return (f'python3 -c \"from scapy.all import * ; sendp({packet}, iface=\'{iface}\', count={count})"') def scapy_base_udp(self, mac, iface, src_ip, dst_ip, dport, sport, count=1, dst=None): if dst is None: dst = 'ff:ff:ff:ff:ff:ff' return self.scapy_template( - ('Ether(dst=\'%s\', src=\'%s\', type=%u) / ' - 'IP(src=\'%s\', dst=\'%s\') / UDP(dport=%s,sport=%s) ' % ( - dst, mac, IPV4_ETH, src_ip, dst_ip, dport, sport)), + (f'Ether(dst=\'{dst}\', src=\'{mac}\', type={IPV4_ETH}) / ' + f'IP(src=\'{src_ip}\', dst=\'{dst_ip}\') / UDP(dport={dport},sport={sport}) '), iface, count) def scapy_dhcp(self, mac, iface, count=1, dst=None): if dst is None: dst = 'ff:ff:ff:ff:ff:ff' return self.scapy_template( - ('Ether(dst=\'%s\', src=\'%s\', type=%u) / ' + (f'Ether(dst=\'{dst}\', src=\'{mac}\', type={IPV4_ETH}) / ' 'IP(src=\'0.0.0.0\', dst=\'255.255.255.255\') / UDP(dport=67,sport=68) / ' - 'BOOTP(op=1) / DHCP(options=[(\'message-type\', \'discover\'), (\'end\')])') % ( - dst, mac, IPV4_ETH), + 'BOOTP(op=1) / DHCP(options=[(\'message-type\', \'discover\'), (\'end\')])'), iface, count) def scapy_icmp(self, mac, iface, src_ip, dst_ip, count=1, dst=None): if dst is None: dst = 'ff:ff:ff:ff:ff:ff' return self.scapy_template( - ('Ether(dst=\'%s\', src=\'%s\', type=%u) / ' - 'IP(src=\'%s\', dst=\'%s\') / ICMP()') % ( - dst, mac, IPV4_ETH, src_ip, dst_ip), + (f'Ether(dst=\'{dst}\', src=\'{mac}\', type={IPV4_ETH}) / ' + f'IP(src=\'{src_ip}\', dst=\'{dst_ip}\') / ICMP()'), iface, count) def scapy_dscp(self, src_mac, dst_mac, dscp_value, iface, count=1): # creates a packet with L2-L4 headers using scapy return self.scapy_template( - ('Ether(dst=\'%s\', src=\'%s\', type=%u) / ' - 'IP(src=\'0.0.0.0\', dst=\'255.255.255.255\', tos=%s) / UDP(dport=67,sport=68) / ' - 'BOOTP(op=1)') % ( - dst_mac, src_mac, IPV4_ETH, dscp_value), + (f'Ether(dst=\'{dst_mac}\', src=\'{src_mac}\', type={IPV4_ETH}) / ' + f'IP(src=\'0.0.0.0\', dst=\'255.255.255.255\', tos={dscp_value}) / UDP(dport=67,sport=68) / ' + 'BOOTP(op=1)'), iface, count) def scapy_bcast(self, host, count=1): @@ -993,51 +984,46 @@ def get_config_header(self, config_global, debug_log, dpid, hardware): int(dpid), hardware, random.randint(1, 2**64 - 1)) def get_gauge_watcher_config(self): - return """ + return f""" port_stats: - dps: ['%s'] + dps: ['{self.DP_NAME}'] type: 'port_stats' interval: 5 db: 'stats_file' port_state: - dps: ['%s'] + dps: ['{self.DP_NAME}'] type: 'port_state' interval: 5 db: 'state_file' flow_table: - dps: ['%s'] + dps: ['{self.DP_NAME}'] type: 'flow_table' interval: 5 db: 'flow_dir' -""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME) +""" def get_gauge_config(self, faucet_config_file, monitor_stats_file, monitor_state_file, monitor_flow_table_dir): """Build Gauge config.""" - return """ + return f""" faucet_configs: - - %s + - {faucet_config_file} watchers: - %s + {self.get_gauage_watcher_config()} dbs: stats_file: type: 'text' - file: %s + file: {monitor_stats_file} state_file: type: 'text' - file: %s + file: {monitor_state_file} flow_dir: type: 'text' - path: %s -%s -""" % (faucet_config_file, - self.get_gauge_watcher_config(), - monitor_stats_file, - monitor_state_file, - monitor_flow_table_dir, - self.GAUGE_CONFIG_DBS) + path: {monitor_flow_table_dir} +{self.GAUGE_CONFIG_DBS} +""" @staticmethod def get_exabgp_conf(peer, peer_config=''): @@ -1055,7 +1041,7 @@ def get_exabgp_conf(peer, peer_config=''): def get_all_groups_desc_from_dpid(self, dpid, timeout=2): int_dpid = mininet_test_util.str_int_dpid(dpid) return self._ofctl_get( - int_dpid, 'stats/groupdesc/%s' % int_dpid, timeout) + int_dpid, f'stats/groupdesc/{int_dpid}', timeout) def get_all_flows_from_dpid(self, dpid, table_id, timeout=10, match=None): """Return all flows from DPID.""" @@ -1065,7 +1051,7 @@ def get_all_flows_from_dpid(self, dpid, table_id, timeout=10, match=None): if match is not None: params['match'] = match return self._ofctl_post( - int_dpid, 'stats/flow/%s' % int_dpid, timeout, params=params) + int_dpid, f'stats/flow/{int_dpid}', timeout, params=params) @staticmethod def _port_stat(port_stats, port): @@ -1079,24 +1065,24 @@ def get_port_stats_from_dpid(self, dpid, port, timeout=2): """Return port stats for a port.""" int_dpid = mininet_test_util.str_int_dpid(dpid) port_stats = self._ofctl_get( - int_dpid, 'stats/port/%s/%s' % (int_dpid, port), timeout) + int_dpid, f'stats/port/{int_dpid}/{port}', timeout) return self._port_stat(port_stats, port) def get_port_desc_from_dpid(self, dpid, port, timeout=2): """Return port desc for a port.""" int_dpid = mininet_test_util.str_int_dpid(dpid) port_stats = self._ofctl_get( - int_dpid, 'stats/portdesc/%s/%s' % (int_dpid, port), timeout) + int_dpid, f'stats/portdesc/{int_dpid}/{port}', timeout) return self._port_stat(port_stats, port) def get_all_meters_from_dpid(self, dpid): """Return all meters from DPID""" int_dpid = mininet_test_util.str_int_dpid(dpid) return self._ofctl_get( - int_dpid, 'stats/meterconfig/%s' % int_dpid, timeout=10) + int_dpid, f'stats/meterconfig/{int_dpid}', timeout=10) def wait_matching_in_group_table(self, action, group_id, timeout=10): - groupdump = os.path.join(self.tmpdir, 'groupdump-%s.txt' % self.dpid) + groupdump = os.path.join(self.tmpdir, f'groupdump-{self.dpid}.txt') for _ in range(timeout): group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1) with open(groupdump, 'w', encoding='utf-8') as groupdump_file: @@ -1111,7 +1097,7 @@ def wait_matching_in_group_table(self, action, group_id, timeout=10): # TODO: Should this have meter_confs as well or can we just match meter_ids def get_matching_meters_on_dpid(self, dpid): - meterdump = os.path.join(self.tmpdir, 'meterdump-%s.log' % dpid) + meterdump = os.path.join(self.tmpdir, f'meterdump-{dpid}.log') meter_dump = self.get_all_meters_from_dpid(dpid) with open(meterdump, 'w', encoding='utf-8') as meterdump_file: meterdump_file.write(str(meter_dump)) @@ -1136,7 +1122,7 @@ def to_old_match(match): del match[new_match] return match - flowdump = os.path.join(self.tmpdir, 'flowdump-%s.log' % dpid) + flowdump = os.path.join(self.tmpdir, f'flowdump-{dpid}.log') match = to_old_match(match) match_set = None exact_mask_match_set = None @@ -1265,7 +1251,7 @@ def wait_until_matching_flow(self, match, table_id, timeout=10, dpid, match, table_id, timeout=timeout, actions=actions, hard_timeout=hard_timeout, cookie=cookie, ofa_match=ofa_match), - msg=('match: %s table_id: %u actions: %s' % (match, table_id, actions))) + msg=(f'match: {match} table_id: {table_id} actions: {actions}')) def wait_until_no_matching_flow(self, match, table_id, timeout=10, actions=None, hard_timeout=0, cookie=None, @@ -1291,7 +1277,7 @@ def mac_learned(self, mac, timeout=10, in_port=None, hard_timeout=1): for eth_field, table_id in ( ('dl_src', self._ETH_SRC_TABLE), ('dl_dst', self._ETH_DST_TABLE)): - match = {eth_field: '%s' % mac} + match = {eth_field: f'{mac}'} match_hard_timeout = 0 if table_id == self._ETH_SRC_TABLE: if in_port is not None: @@ -1311,19 +1297,17 @@ def scrape_port_counters(self, ports, port_vars): val = self.scrape_prometheus_var( port_var, labels=port_labels, controller=self.gauge_controller.name, dpid=True, retries=3) - self.assertIsNotNone(val, '%s missing for port %s' % (port_var, port)) + self.assertIsNotNone(val, f'{port_var} missing for port {port}') port_counters[port][port_var] = val # Require port to be up and reporting non-zero speed. speed = self.scrape_prometheus_var( 'of_port_curr_speed', labels=port_labels, controller=self.gauge_controller.name, retries=3) - self.assertTrue(speed and speed > 0, msg='%s %s: %s' % ( - 'of_port_curr_speed', port_labels, speed)) + self.assertTrue(speed and speed > 0, msg=f'of_port_curr_speed {port_labels}: {speed}') state = self.scrape_prometheus_var( 'of_port_state', labels=port_labels, controller=self.gauge_controller.name, retries=3) - self.assertFalse(state & ofp.OFPPS_LINK_DOWN, msg='%s %s: %s' % ( - 'of_port_state', port_labels, state)) + self.assertFalse(state & ofp.OFPPS_LINK_DOWN, msg=f'of_port_state {port_labels}: {state}') return port_counters def wait_ports_updating(self, ports, port_vars, stimulate_counters_func=None): @@ -1353,7 +1337,7 @@ def wait_ports_updating(self, ports, port_vars, stimulate_counters_func=None): end_time = time.time() - error('counter latency up to %u sec\n' % (end_time - start_time)) + error(f'counter latency up to {end_time - start_time} sec\n') return not ports_not_updated @staticmethod @@ -1389,7 +1373,7 @@ def host_learned(self, host, timeout=10, in_port=None, hard_timeout=1): @staticmethod def get_host_intf_mac(host, intf): - return host.cmd('cat /sys/class/net/%s/address' % intf).strip() + return host.cmd(f'cat /sys/class/net/{intf}/address').strip() def get_host_netns(self, host): hostns = self.hostns(host) @@ -1437,7 +1421,7 @@ def require_host_learned(self, host, retries=8, in_port=None, hard_timeout=1): if self.host_learned(host, timeout=1, in_port=in_port, hard_timeout=hard_timeout): return learn_result = self.stimulate_host_learn(host) - self.fail('Could not learn host %s (%s): %s' % (host, host.MAC(), learn_result)) + self.fail(f'Could not learn host {host} ({host.MAC()}): {learn_result}') def get_prom_port(self, controller=None): if controller is None: @@ -1451,11 +1435,9 @@ def get_prom_addr(self, controller=None): def _prometheus_url(self, controller): if 'faucet' in controller: - return 'http://[%s]:%u' % ( - self.get_prom_addr(), self.get_prom_port()) + return f'http://[{self.get_prom_addr()}]:{self.get_prom_port()}' if 'gauge' in controller: - return 'http://[%s]:%u' % ( - self.get_prom_addr(), self.config_ports['gauge_prom_port']) + return f'http://[{self.get_prom_addr()}]:{self.config_ports['gauge_prom_port']}' raise NotImplementedError def scrape_prometheus(self, controller=None, timeout=15, var=None, verify_consistent=False): @@ -1483,7 +1465,7 @@ def scrape_prometheus(self, controller=None, timeout=15, var=None, verify_consis prom_raw = requests.get(url, {}, timeout=timeout).text except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout): return [] - log_filename = os.path.join(self.tmpdir, '%s-prometheus.log' % controller_name) + log_filename = os.path.join(self.tmpdir, f'{controller_name}-prometheus.log') with open(log_filename, 'w', encoding='utf-8') as prom_log: prom_log.write(prom_raw) prom_lines = [ @@ -1522,15 +1504,14 @@ def verify_prom_var(self, all_prom_lines): self.assertEqual(var_a, var_b) val_a = int(float(match_a.group(2))) val_b = int(float(match_b.group(2))) - self.assertEqual(val_a, val_b, msg='%s %s inconsistent' % - (prom_line_a, prom_line_b)) + self.assertEqual(val_a, val_b, msg=f'{prom_line_a} {prom_line_b} inconsistent') def parse_prom_var(self, prom_line): """Parse prometheus variable, return tuple of variable name, variable value""" prom_line_match = self._PROM_LINE_RE.match(prom_line) self.assertIsNotNone( prom_line_match, - msg='Invalid prometheus line %s' % prom_line) + msg=f'Invalid prometheus line {prom_line}') prom_var = prom_line_match.group(1) prom_val = int(float(prom_line_match.group(2))) return (prom_var, prom_val) @@ -1589,7 +1570,7 @@ def scrape_prometheus_var(self, var, labels=None, any_labels=False, default=None if labels: label_values = [] for label, value in sorted(labels.items()): - label_values.append('%s="%s"' % (label, value)) + label_values.append(f'{label}="{value}"') label_values_re = r'\{%s\}' % r'\S+'.join(label_values) var_re = re.compile(r'^%s%s$' % (var, label_values_re)) for i in range(retries): @@ -1628,7 +1609,7 @@ def gauge_smoke_test(self): found_watcher_files = set() missing_watcher_files = watcher_files - found_watcher_files self.assertEqual( - missing_watcher_files, set(), msg='Gauge missing logs: %s' % missing_watcher_files) + missing_watcher_files, set(), msg=f'Gauge missing logs: {missing_watcher_files}') self.hup_controller(self.gauge_controller.name) self.verify_no_exception(self.env[self.faucet_controllers[0].name]['FAUCET_EXCEPTION_LOG']) @@ -1639,12 +1620,12 @@ def prometheus_smoke_test(self): r'faucet_config\S+name=\"flood\"', r'faucet_pbr_version\S+version='): self.assertTrue( re.search(r'%s\S+\s+[1-9]+' % nonzero_var, prom_out), - msg='expected %s to be nonzero (%s)' % (nonzero_var, prom_out)) + msg=f'expected {nonzero_var} to be nonzero ({prom_out})') for zero_var in ( 'of_errors', 'of_dp_disconnections'): self.assertTrue( re.search(r'%s\S+\s+0' % zero_var, prom_out), - msg='expected %s to be present and zero (%s)' % (zero_var, prom_out)) + msg=f'expected {zero_var} to be present and zero ({prom_out})') def get_configure_count(self, retries=5, controller=None): """Return the number of times FAUCET has processed a reload request.""" @@ -1694,11 +1675,10 @@ def _update_conf(conf_path, yaml_conf): self.assertFalse( cold_start, msg='host cache is not maintained with cold start') self.assertTrue( - new_mac_table, msg='no host cache for VLAN %u' % host_cache) + new_mac_table, msg=f'no host cache for VLAN {host_cache}') self.assertEqual( old_mac_table, new_mac_table, - msg='host cache for VLAN %u not same over reload (old %s, new %s)' % ( - host_cache, old_mac_table, new_mac_table)) + msg=f'host cache for VLAN {host_cache} not same over reload (old {old_mac_table}, new {new_mac_table})') else: verify_faucet_reconf_func() return @@ -1790,8 +1770,7 @@ def _verify_xcast(self, received_expected, packets, tcpdump_filter, scapy_cmd, h host_b, tcpdump_filter, [partial(host_a.cmd, scapy_cmd)], packets=1, timeout=2) - msg = '%s (%s) -> %s (%s): %s' % ( - host_a, host_a.MAC(), host_b, host_b.MAC(), tcpdump_txt) + msg = f'{host_a} ({host_a.MAC()}) -> {host_b} ({host_b.MAC()}): {tcpdump_txt}' received_no_packets = self.tcpdump_rx_packets(tcpdump_txt, packets=0) received_packets = received_packets or not received_no_packets if received_packets: @@ -1812,7 +1791,7 @@ def verify_broadcast(self, hosts=None, broadcast_expected=True, packets=3): host_a, host_b = hosts tcpdump_filter = ' and '.join(( 'ether dst host ff:ff:ff:ff:ff:ff', - 'ether src host %s' % host_a.MAC(), + f'ether src host {host_a.MAC()}', 'udp')) scapy_cmd = self.scapy_bcast(host_a, count=packets) return self._verify_xcast(broadcast_expected, packets, tcpdump_filter, @@ -1824,36 +1803,35 @@ def verify_unicast(self, hosts, unicast_expected=True, packets=3): if hosts is not None: host_a, host_b = hosts tcpdump_filter = ' and '.join(( - 'ether dst %s' % host_b.MAC(), - 'ether src %s' % host_a.MAC(), + f'ether dst {host_b.MAC()}', + f'ether src {host_a.MAC()}', 'udp')) scapy_cmd = self.scapy_template( - ('Ether(src=\'%s\', dst=\'%s\', type=%u) / ' - 'IP(src=\'%s\', dst=\'%s\') / UDP(dport=67,sport=68)') % ( - host_a.MAC(), host_b.MAC(), IPV4_ETH, - host_a.IP(), host_b.IP()), host_a.defaultIntf(), count=packets) + (f'Ether(src=\'{host_a.MAC()}\', dst=\'{host_b.MAC()}\', type={IPV4_ETH}) / ' + f'IP(src=\'{host_a.IP()}\', dst=\'{host_b.IP()}\') / UDP(dport=67,sport=68)'), + host_a.defaultIntf(), count=packets) return self._verify_xcast(unicast_expected, packets, tcpdump_filter, scapy_cmd, host_a, host_b) def verify_empty_caps(self, cap_files): cap_file_cmds = [ - 'tcpdump -n -v -A -r %s 2> /dev/null' % cap_file for cap_file in cap_files] + f'tcpdump -n -v -A -r {cap_file for cap_file in cap_files} 2> /dev/null'] self.quiet_commands(self.net.controllers[0], cap_file_cmds) def verify_no_bcast_to_self(self, timeout=3): bcast_cap_files = [] tcpdump_timeout = timeout * len(self.hosts_name_ordered()) * 2 for host in self.hosts_name_ordered(): - tcpdump_filter = '-Q in ether src %s' % host.MAC() - bcast_cap_file = os.path.join(self.tmpdir, '%s-bcast.cap' % host) + tcpdump_filter = f'-Q in ether src {host.MAC()}' + bcast_cap_file = os.path.join(self.tmpdir, f'{host}-bcast.cap') bcast_cap_files.append(bcast_cap_file) host.cmd(mininet_test_util.timeout_cmd( - 'tcpdump -U -n -c 1 -i %s -w %s %s &' % ( - host.defaultIntf(), bcast_cap_file, tcpdump_filter), tcpdump_timeout)) + f'tcpdump -U -n -c 1 -i {host.defaultIntf()} -w {bcast_cap_file} {tcpdump_filter} &', + tcpdump_timeout)) for host in self.hosts_name_ordered(): for bcast_cmd in ( - ('ndisc6 -w1 fe80::1 %s' % host.defaultIntf()), - ('ping -b -i0.1 -c3 %s' % self.ipv4_vip_bcast())): + (f'ndisc6 -w1 fe80::1 {host.defaultIntf()}'), + (f'ping -b -i0.1 -c3 {self.ipv4_vip_bcast()}')): host.cmd(mininet_test_util.timeout_cmd(bcast_cmd, timeout)) self.verify_empty_caps(bcast_cap_files) @@ -1865,7 +1843,7 @@ def verify_unicast_not_looped(self, packets=3): 'IP(src=\'10.0.0.100\', dst=\'10.0.0.255\')/' 'UDP(dport=9)/' 'b\'hello\'') - tcpdump_filter = '-Q in ether src %s' % unicast_mac1 + tcpdump_filter = f'-Q in ether src {unicast_mac1}' for host in self.hosts_name_ordered(): host.cmd( self.scapy_template( @@ -1890,13 +1868,12 @@ def verify_controller_fping(self, host, faucet_vip, fping_bin = 'fping' if faucet_vip.version == 6: fping_bin = 'fping6' - fping_cli = '%s %s -b %u -c %u -i %u %s' % ( - fping_bin, self.FPING_ARGS_SHORT, size, total_packets, - packet_interval_ms, faucet_vip.ip) + fping_cli = f'{fping_pin} {self.FPING_ARGS_SHORT} -b {size}' \ + f' -c {total_packets} -i {packet_interval_ms} {faucet_vip.ip}' timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5) fping_out = host.cmd(mininet_test_util.timeout_cmd( fping_cli, timeout)) - error('%s: %s' % (self._test_name(), fping_out)) + error(f'{self._test_name()}: {fping_out}') self.assertTrue( re.search(r'\s+[1-9][0-9]* ICMP Echo Replies received', fping_out), msg=fping_out) @@ -1910,7 +1887,7 @@ def verify_learn_counters(self, vlan, ports, verify_neighbors=False): port_vlan_hosts_learned = 0 prom_macs_learned = 0 for port in ports: - port_no = self.port_map['port_%u' % port] + port_no = self.port_map[f'port_{port}'] labels = {'vlan': str(vlan)} labels.update(self.port_labels(port_no)) port_vlan_hosts_learned += self.scrape_prometheus_var( @@ -1957,7 +1934,7 @@ def generate_mac_intfs(test_ipas, other_hosts): mac_intf_ipv4s = [] for i in range(0, max_hosts): host = other_hosts[i % len(other_hosts)] - mac_intf = 'mac%u' % i + mac_intf = f'mac{i}' mac_ipv4 = str(test_ipas[i]) mac_intf_ipv4s.append((host, mac_intf, mac_ipv4)) return mac_intf_ipv4s @@ -1975,10 +1952,10 @@ def generate_mac_intfs(test_ipas, other_hosts): learn_hosts = min_hosts successful_learn_hosts = 0 - fping_prefix = 'fping %s -q -c 1' % self.FPING_ARGS_SHORT + fping_prefix = f'fping {self.FPING_ARGS_SHORT} -q -c 1' pps_ms = 1e3 / learn_pps while learn_hosts <= max_hosts and successful_learn_hosts < max_hosts: - error('will learn %u hosts\n' % learn_hosts) + error(f'will learn {learn_hosts} hosts\n') start_time = time.time() learn_host_list = mac_intf_ipv4s[successful_learn_hosts:learn_hosts] random.shuffle(learn_host_list) @@ -1987,7 +1964,7 @@ def generate_mac_intfs(test_ipas, other_hosts): fping_conf_start = time.time() self.add_macvlan(host, mac_intf, mac_ipv4, ipm=test_net.prefixlen) simplify_intf_conf(host, mac_intf) - host.cmd('%s -I%s %s' % (fping_prefix, mac_intf, str(learn_ip))) + host.cmd(f'{fping_prefix} -I{mac_intf} {str(learn_ip)}') fping_ms = (time.time() - fping_conf_start) * 1e3 if fping_ms < pps_ms: time.sleep((pps_ms - fping_ms) / 1e3) @@ -2006,7 +1983,7 @@ def verify_connectivity(learn_hosts): error('.') random_unverified_ips = list(unverified_ips) random.shuffle(random_unverified_ips) - fping_cmd = '%s %s' % (fping_prefix, ' '.join(random_unverified_ips)) + fping_cmd = f'{fping_prefix} {" ".join(random_unverified_ips)}' fping_lines = first_host.cmd(fping_cmd).splitlines() for fping_line in fping_lines: loss_match = loss_re.match(fping_line) @@ -2020,7 +1997,7 @@ def verify_connectivity(learn_hosts): else: break if unverified_ips: - error('could not verify connectivity for all hosts: %s\n' % unverified_ips) + error(f'could not verify connectivity for all hosts: {unverified_ips}\n') return False return self.wait_for_prometheus_var( @@ -2030,8 +2007,7 @@ def verify_connectivity(learn_hosts): if verify_connectivity(learn_hosts): learn_time = time.time() - start_time # dump_packet_counters() - error('verified %u hosts learned in %u sec\n' % ( - learn_hosts, learn_time)) + error(f'verified {learn_hosts} hosts learned in {learn_time} sec\n') successful_learn_hosts = learn_hosts learn_hosts = min(learn_hosts * 2, max_hosts) else: @@ -2044,11 +2020,10 @@ def verify_vlan_flood_limited(self, vlan_first_host, vlan_second_host, for first_host, second_host in ( (vlan_first_host, vlan_second_host), (vlan_second_host, vlan_first_host)): - tcpdump_filter = 'ether host %s or ether host %s' % ( - first_host.MAC(), second_host.MAC()) + tcpdump_filter = f'ether host {first_host.MAC()} or ether host {second_host.MAC()}' tcpdump_txt = self.tcpdump_helper( other_vlan_host, tcpdump_filter, [ - partial(first_host.cmd, 'arp -d %s' % second_host.IP()), + partial(first_host.cmd, f'arp -d {second_host.IP()}'), partial(first_host.cmd, ' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], packets=1) self.verify_no_packets(tcpdump_txt) @@ -2060,9 +2035,8 @@ def verify_ping_mirrored(self, first_host, second_host, mirror_host, both_mirror self.require_host_learned(host) self.retry_net_ping(hosts=(first_host, second_host)) tcpdump_filter = ( - '(ether src %s or ether src %s) and ' - '(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % ( - first_host.MAC(), second_host.MAC()) + f'(ether src {first_host.MAC()} or ether src {second_host.MAC()}) and ' + '(icmp[icmptype] == 8 or icmp[icmptype] == 0)') first_ping_second = ' '.join((self.FPINGS_ARGS_ONE, second_host.IP())) expected_pings = 2 max_expected_pings = 2 @@ -2072,10 +2046,10 @@ def verify_ping_mirrored(self, first_host, second_host, mirror_host, both_mirror mirror_host, tcpdump_filter, [ partial(first_host.cmd, first_ping_second)], packets=(max_expected_pings + 1)) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt), + f'{second_host.IP()}: ICMP echo request', tcpdump_txt), msg=tcpdump_txt) self.assertTrue(re.search( - '%s: ICMP echo reply' % first_host.IP(), tcpdump_txt), + f'{first_host.IP()}: ICMP echo reply', tcpdump_txt), msg=tcpdump_txt) received_pings = self.match_tcpdump_rx_packets(tcpdump_txt) self.assertGreaterEqual(received_pings, expected_pings) @@ -2090,19 +2064,19 @@ def verify_bcast_ping_mirrored(self, first_host, second_host, mirror_host, self.require_host_learned(host) self.retry_net_ping(hosts=(first_host, second_host)) tcpdump_filter = ( - 'ether src %s and ether dst ff:ff:ff:ff:ff:ff and ' - 'icmp[icmptype] == 8') % second_host.MAC() + f'ether src {second_host.MAC()} and ether dst ff:ff:ff:ff:ff:ff and ' + 'icmp[icmptype] == 8') if tagged: - tcpdump_filter = 'vlan and %s' % tcpdump_filter + tcpdump_filter = f'vlan and {tcpdump_filter}' else: - tcpdump_filter = '%s and not vlan' % tcpdump_filter - second_ping_bcast = 'ping -c3 -b %s' % self.ipv4_vip_bcast() + tcpdump_filter = f'{tcpdump_filter} and not vlan' + second_ping_bcast = f'ping -c3 -b {self.ipv4_vip_bcast()}' tcpdump_txt = self.tcpdump_helper( mirror_host, tcpdump_filter, [ partial(second_host.cmd, second_ping_bcast)], packets=1) self.assertTrue(re.search( - '%s: ICMP echo request' % self.ipv4_vip_bcast(), tcpdump_txt), + f'{self.ipv4_vip_bcast()}: ICMP echo request', tcpdump_txt), msg=tcpdump_txt) def verify_ping_mirrored_multi(self, ping_pairs, mirror_host, both_mirrored=False): @@ -2132,8 +2106,8 @@ def verify_ping_mirrored_multi(self, ping_pairs, mirror_host, both_mirrored=Fals mirror_mac = mirror_host.MAC() tcpdump_filter = ( - 'not ether src %s and ' - '(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % mirror_mac + f'not ether src {mirror_mac} and ' + '(icmp[icmptype] == 8 or icmp[icmptype] == 0)') # Calculate the execpted number of pings we need # to capture to validate port mirroring @@ -2152,10 +2126,10 @@ def verify_ping_mirrored_multi(self, ping_pairs, mirror_host, both_mirrored=Fals for hosts in ping_pairs: self.assertTrue(re.search( - '%s > %s: ICMP echo request' % (hosts[0].IP(), hosts[1].IP()), tcpdump_txt), + f'{hosts[0].IP()} > {hosts[1].IP()}: ICMP echo request', tcpdump_txt), msg=tcpdump_txt) self.assertTrue(re.search( - '%s > %s: ICMP echo reply' % (hosts[1].IP(), hosts[0].IP()), tcpdump_txt), + f'{hosts[1].IP()} > {hosts[0].IP()}: ICMP echo reply', tcpdump_txt), msg=tcpdump_txt) received_pings = self.match_tcpdump_rx_packets(tcpdump_txt) @@ -2183,16 +2157,13 @@ def verify_eapol_mirrored(self, first_host, second_host, mirror_host): mirror_mac = mirror_host.MAC() tmp_eap_conf = os.path.join(self.tmpdir, 'eap.conf') tcpdump_filter = ( - 'not ether src %s and ether proto 0x888e' % mirror_mac) + f'not ether src {mirror_mac} and ether proto 0x888e') eap_conf_cmd = ( 'echo "eapol_version=2\nap_scan=0\nnetwork={\n' 'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n' - 'password=\\"password\\"\n}\n" > %s' % tmp_eap_conf) + f'password=\\"password\\"\n}\n" > {tmp_eap_conf}' wpa_supplicant_cmd = mininet_test_util.timeout_cmd( - 'wpa_supplicant -c%s -Dwired -i%s -d' % ( - tmp_eap_conf, - first_host.defaultIntf().name), - 3) + f'wpa_supplicant -c{tmp_eap_conf} -Dwired -i{first_host.defaultIntf().name} -d', 3) tcpdump_txt = self.tcpdump_helper( mirror_host, tcpdump_filter, [ partial(first_host.cmd, eap_conf_cmd), @@ -2206,9 +2177,9 @@ def verify_eapol_mirrored(self, first_host, second_host, mirror_host): def bogus_mac_flooded_to_port1(self): first_host, second_host, third_host = self.hosts_name_ordered()[0:3] - unicast_flood_filter = 'ether host %s' % self.BOGUS_MAC - static_bogus_arp = 'arp -s %s %s' % (first_host.IP(), self.BOGUS_MAC) - curl_first_host = 'curl -m 5 http://%s' % first_host.IP() + unicast_flood_filter = f'ether host {self.BOGUS_MAC}' + static_bogus_arp = f'arp -s {first_host.IP()} {self.BOGUS_MAC}' + curl_first_host = f'curl -m 5 http://{first_host.IP()}' tcpdump_txt = self.tcpdump_helper( first_host, unicast_flood_filter, [lambda: second_host.cmd(static_bogus_arp), @@ -2218,8 +2189,7 @@ def bogus_mac_flooded_to_port1(self): def ladvd_cmd(self, ladvd_args, repeats=1, timeout=3): ladvd_mkdir = 'mkdir -p /var/run/ladvd' - ladvd_all_args = ['%s %s' % ( - mininet_test_util.timeout_cmd(self.LADVD, timeout), ladvd_args)] * repeats + ladvd_all_args = [f'{mininet_test_util.timeout_cmd(self.LADVD, timeout)} {ladvd_args}'] * repeats ladvd_cmd = ';'.join([ladvd_mkdir] + ladvd_all_args) return ladvd_cmd @@ -2280,7 +2250,7 @@ def verify_faucet_reconf(self, timeout=20, break time.sleep(1) self.assertNotEqual( - start_configure_count, configure_count, 'FAUCET %s did not reconfigure' % cont_name) + start_configure_count, configure_count, f'FAUCET {cont_name} did not reconfigure') if cold_start is not None: old_count = old_counts[i] if change_expected: @@ -2294,14 +2264,14 @@ def verify_faucet_reconf(self, timeout=20, time.sleep(1) self.assertTrue( new_count > old_count, - msg='FAUCET %s %s did not increment: %u' % (cont_name, var, new_count)) + msg=f'FAUCET {cont_name} {var} did not increment: {new_count}') else: new_count = int( self.scrape_prometheus_var(var, controller=cont_name, dpid=dpid, default=0)) self.assertEqual( old_count, new_count, - msg='FAUCET %s %s incremented: %u' % (cont_name, var, new_count)) + msg=f'FAUCET {cont_name} {var} incremented: {new_count}') self.wait_for_prometheus_var('faucet_config_applied', 1, controller=cont_name, dpid=None, timeout=30) self.wait_dp_status(1, controller=cont_name) @@ -2329,7 +2299,7 @@ def wait_host_stats_updated(self, hosts_switch_ports, timeout, sync_counters_fun if self.get_host_port_stats(hosts_switch_ports) != first: return time.sleep(1) - self.fail('port stats for %s never updated' % hosts_switch_ports) + self.fail(f'port stats for {hosts_switch_ports} never updated') def of_bytes_mbps(self, start_port_stats, end_port_stats, var, seconds): return (end_port_stats[var] - start_port_stats[var]) * 8 / seconds / self.ONEMBPS @@ -2360,8 +2330,7 @@ def verify_iperf_min(self, hosts_switch_ports, min_mbps, client_ip, server_ip, iperf_to_max = 0 if max_of_mbps: iperf_to_max = iperf_mbps / max_of_mbps - msg = 'iperf: %fmbps, of: %fmbps (%f)' % ( - iperf_mbps, max_of_mbps, iperf_to_max) + msg = f'iperf: {iperf_mbps}mbps, of: {max_of_mbps}mbps ({iperf_to_max})' error(msg) if ((iperf_to_max < (1.0 - prop)) or (iperf_to_max > (1.0 + prop))): @@ -2373,7 +2342,7 @@ def verify_iperf_min(self, hosts_switch_ports, min_mbps, client_ip, server_ip, @staticmethod def port_labels(port_no): - port_name = 'b%u' % port_no + port_name = f'b{port_no}' return {'port': port_name, 'port_description': port_name} def set_dpid_names(self, dpid_names): @@ -2421,7 +2390,7 @@ def _get_tableid(self, name, retries, default): def quiet_commands(self, host, commands): for command in commands: result = host.cmd(command) - self.assertEqual('', result, msg='%s: %s' % (command, result)) + self.assertEqual('', result, msg=f'{command}: {result}') def _config_tableids(self): # Wait for VLAN table to appear, rapidly scrape the rest. @@ -2464,46 +2433,42 @@ def flap_all_switch_ports(self, flap_time=MIN_FLAP_TIME): @staticmethod def get_mac_of_intf(intf, host=None): """Get MAC address of a port.""" - address_file_name = '/sys/class/net/%s/address' % intf + address_file_name = f'/sys/class/net/{intf}/address' if host is None: with open(address_file_name, encoding='utf-8') as address_file: address = address_file.read() else: - address = host.cmd('cat %s' % address_file_name) + address = host.cmd(f'cat {address_file_name}') return address.strip().lower() def add_macvlan(self, host, macvlan_intf, ipa=None, ipm=24, mac=None, mode='vepa'): if mac is None: mac = '' else: - mac = 'address %s' % mac + mac = f'address {mac}' add_cmds = [ - 'ip link add %s link %s %s type macvlan mode %s' % ( - macvlan_intf, host.defaultIntf(), mac, mode), - 'ip link set dev %s up' % macvlan_intf] + f'ip link add {macvlan_intf} link {host.defaultIntf()} {mac} type macvlan mode {mode}', + f'ip link set dev {macvlan_intf} up'] if ipa: add_cmds.append( - 'ip address add %s/%s brd + dev %s' % (ipa, ipm, macvlan_intf)) + f'ip address add {ipa}/{ipm} brd + dev {macvlan_intf}') self.quiet_commands(host, add_cmds) def del_macvlan(self, host, macvlan_intf): self.quiet_commands(host, [ - host.cmd('ip link del link %s %s' % ( - host.defaultIntf(), macvlan_intf))]) + host.cmd(f'ip link del link {host.defaultIntf()} {macvlan_intf}')]) def add_host_ipv6_address(self, host, ip_v6, intf=None): """Add an IPv6 address to a Mininet host.""" if intf is None: intf = host.intf() self.quiet_commands(host, [ - host.cmd('ip -6 addr add %s dev %s' % (ip_v6, intf))]) + host.cmd(f'ip -6 addr add {ip_v6} dev {intf}')]) def add_host_route(self, host, ip_dst, ip_gw): """Add an IP route to a Mininet host.""" - host.cmd('ip -%u route del %s' % ( - ip_dst.version, ip_dst.network.with_prefixlen)) - add_cmd = 'ip -%u route add %s via %s' % ( - ip_dst.version, ip_dst.network.with_prefixlen, ip_gw) + host.cmd(f'ip -{ip_dst.version} route del {ip_dst.network.with_prefixlen}') + add_cmd = f'ip -{ip_dst.version} route add {ip_dst.network.with_prefixle} via {ip_gw}' self.quiet_commands(host, (add_cmd,)) def _ip_ping(self, host, dst, retries, timeout=500, @@ -2513,8 +2478,7 @@ def _ip_ping(self, host, dst, retries, timeout=500, if intf is None: intf = host.defaultIntf() good_ping = r'xmt/rcv/%%loss = %u/%u/0%%' % (count, count) - ping_cmd = '%s %s -c%u -I%s -t%u %s' % ( - fping_bin, self.FPING_ARGS, count, intf, timeout, dst) + ping_cmd = f'{fping_bin} {self.FPING_ARGS} -c{count} -I{intf} -t{timeout} {dst}' if require_host_learned: self.require_host_learned(host) pause = timeout / 1e3 @@ -2525,8 +2489,7 @@ def _ip_ping(self, host, dst, retries, timeout=500, break time.sleep(pause) pause *= 2 - self.assertEqual(ping_result, expected_result, msg='%s %s: %s' % ( - ping_cmd, ping_result, ping_out)) + self.assertEqual(ping_result, expected_result, msg=f'{ping_cmd} {ping_result}: {ping_out}') def one_ipv4_ping(self, host, dst, retries=3, timeout=1000, intf=None, require_host_learned=True, expected_result=True): @@ -2585,7 +2548,7 @@ def retry_net_ping(self, hosts=None, required_loss=0, retries=3, timeout=2): if loss <= required_loss: return time.sleep(1) - self.fail('ping %f loss > required loss %f' % (loss, required_loss)) + self.fail(f'ping {loss} loss > required loss {required_loss}') @staticmethod def tcp_port_free(host, port, ipv=4): @@ -2602,7 +2565,7 @@ def wait_for_tcp_free(self, host, port, timeout=10, ipv=4): if listen_out is None: return time.sleep(1) - self.fail('%s busy on port %u (%s)' % (host, port, listen_out)) + self.fail(f'{host} busy on port {port} ({listen_out})') def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4): """Wait for a host to start listening on a port.""" @@ -2611,12 +2574,12 @@ def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4): if listen_out is not None: return time.sleep(1) - self.fail('%s never listened on port %u' % (host, port)) + self.fail(f'{host} never listened on port {port}') def serve_str_on_tcp_port(self, host, port, serve_str='hello', timeout=20): """Serve str on a TCP port on a host.""" host.cmd(mininet_test_util.timeout_cmd( - 'echo %s | nc -l %s %u &' % (serve_str, host.IP(), port), timeout)) + f'echo {serve_str} | nc -l {host.IP()} {timeout} &') self.wait_for_tcp_listen(host, port) def wait_nonzero_packet_count_flow(self, match, table_id, timeout=15, @@ -2632,14 +2595,13 @@ def wait_nonzero_packet_count_flow(self, match, table_id, timeout=15, return time.sleep(1) if flow: - self.fail('DPID %s flow %s matching %s table ID %s had zero packet count' % - (dpid, flow, match, table_id)) + self.fail(f'DPID {dpid} flow {flow} matching {match} table ID {table_id} had zero packet count') else: - self.fail('no flow matching %s table ID %s' % (match, table_id)) + self.fail(f'no flow matching {match} table ID {table_id}') def verify_tp_dst_blocked(self, port, first_host, second_host, table_id=0, mask=None): """Verify that a TCP port on a host is blocked from another host.""" - client_cmd = mininet_test_util.timeout_cmd('nc %s %u' % (second_host.IP(), port), 5) + client_cmd = mininet_test_util.timeout_cmd(f'nc {second_host.IP()} {port}', 5) self.serve_str_on_tcp_port(second_host, port) self.quiet_commands(first_host, (client_cmd,)) if table_id is None: @@ -2659,7 +2621,7 @@ def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0): """Verify that a TCP port on a host is NOT blocked from another host.""" serve_str = ''.join(random.choice(string.ascii_letters) for i in range(8)) self.serve_str_on_tcp_port(second_host, port, serve_str=serve_str) - client_str = first_host.cmd('nc -w 10 %s %u' % (second_host.IP(), port)).strip() + client_str = first_host.cmd(f'nc -w 10 {second_host.IP()} {port}').strip() self.assertEqual(serve_str, client_str) if table_id is None: return @@ -2667,15 +2629,13 @@ def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0): {'tp_dst': int(port), 'dl_type': IPV4_ETH, 'ip_proto': 6}, table_id) def bcast_dst_blocked_helper(self, port, first_host, second_host, success_re, retries): - tcpdump_filter = 'udp and ether src %s and ether dst %s' % ( - first_host.MAC(), "ff:ff:ff:ff:ff:ff") + tcpdump_filter = f'udp and ether src {first_host.MAC()} and ether dst ff:ff:ff:ff:ff:ff' target_addr = str(self.FAUCET_VIPV4.network.broadcast_address) for _ in range(retries): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ partial(first_host.cmd, ( - 'date | socat - udp-datagram:%s:%d,broadcast' % ( - target_addr, port)))], + f'date | socat - udp-datagram:{target_addr}:{port},broadcast'))], packets=1) if re.search(success_re, tcpdump_txt): return True @@ -2702,14 +2662,14 @@ def swap_host_macs(first_host, second_host): def start_exabgp(self, exabgp_conf, timeout=30, log_prefix=''): """Start exabgp process on controller host.""" - exabgp_conf_file_name = os.path.join(self.tmpdir, '%sexabgp.conf' % log_prefix) - exabgp_log = os.path.join(self.tmpdir, '%sexabgp.log' % log_prefix) - exabgp_out = os.path.join(self.tmpdir, '%sexabgp.out' % log_prefix) + exabgp_conf_file_name = os.path.join(self.tmpdir, f'{log_prefix}exabgp.conf') + exabgp_log = os.path.join(self.tmpdir, f'{log_prefix}exabgp.log') + exabgp_out = os.path.join(self.tmpdir, f'{log_prefix}exabgp.out') exabgp_env = ' '.join(( 'exabgp.daemon.user=root', 'exabgp.log.all=true', 'exabgp.log.level=DEBUG', - 'exabgp.log.destination=%s' % exabgp_log, + f'exabgp.log.destination={exabgp_log}', )) bgp_port = self.config_ports['bgp_port'] exabgp_conf = exabgp_conf % {'bgp_port': bgp_port} @@ -2718,16 +2678,15 @@ def start_exabgp(self, exabgp_conf, timeout=30, log_prefix=''): controller = self._get_controller() # Ensure exabgp only attempts one connection. exabgp_cmd = mininet_test_util.timeout_cmd( - 'exabgp %s --once -d 2>&1 > %s &' % ( - exabgp_conf_file_name, exabgp_out), 300) - exabgp_cli = 'env %s %s' % (exabgp_env, exabgp_cmd) + f'exabgp {exabgp_conf_file_name} --once -d 2>&1 > {exabgp_out} &', 300) + exabgp_cli = f'env {exabgp_env} {exabgp_cmd}' controller.cmd(exabgp_cli) for _ in range(timeout): if os.path.exists(exabgp_log): break time.sleep(1) self.assertTrue( - os.path.exists(exabgp_log), msg='exabgp (%s) did not start' % exabgp_cli) + os.path.exists(exabgp_log), msg=f'exabgp ({exabgp_cli}) did not start') return (exabgp_log, exabgp_out) def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err): @@ -2747,7 +2706,7 @@ def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err): if os.path.exists(log_name): with open(log_name, encoding='utf-8') as log: exabgp_log_content.append(log.read()) - self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content)) + self.fail(f'exabgp did not peer with FAUCET: {"\n".join(exabgp_log_content)}') @staticmethod def matching_lines_from_file(exp, log_name): @@ -2766,7 +2725,7 @@ def wait_until_matching_lines_from_file(self, exp, log_name, timeout=30, count=1 if len(lines) >= count: return lines time.sleep(1) - self.fail('%s not found in %s (%d/%d)' % (exp, log_name, len(lines), count)) + self.fail(f'{exp} not found in {log_name} ({len(lines)}/{count})') def wait_until_no_matching_lines_from_file(self, exp, log_name, timeout=30, count=1): """Require (count) matching lines to be non-existent in file.""" @@ -2776,7 +2735,7 @@ def wait_until_no_matching_lines_from_file(self, exp, log_name, timeout=30, coun if os.path.exists(log_name): lines = self.matching_lines_from_file(exp, log_name) if len(lines) >= count: - return self.fail('%s found in %s (%d/%d)' % (exp, log_name, len(lines), count)) + return self.fail(f'{exp} found in {log_name} ({len(lines)}/{count})') time.sleep(1) return lines @@ -2817,18 +2776,17 @@ def start_wpasupplicant(self, host, wpasupplicant_conf, timeout=10, log_prefix=' wpa_ctrl_socket_path=''): """Start wpasupplicant process on Mininet host.""" wpasupplicant_conf_file_name = os.path.join( - self.tmpdir, '%swpasupplicant.conf' % log_prefix) + self.tmpdir, f'{log_prefix}wpasupplicant.conf') wpasupplicant_log = os.path.join( - self.tmpdir, '%swpasupplicant.log' % log_prefix) + self.tmpdir, f'{log_prefix}wpasupplicant.log') with open(wpasupplicant_conf_file_name, 'w', encoding='utf-8') as wpasupplicant_conf_file: wpasupplicant_conf_file.write(wpasupplicant_conf) wpa_ctrl_socket = '' if wpa_ctrl_socket_path: - wpa_ctrl_socket = '-C %s' % wpa_ctrl_socket_path + wpa_ctrl_socket = f'-C {wpa_ctrl_socket_path}' wpasupplicant_cmd = mininet_test_util.timeout_cmd( - 'wpa_supplicant -dd -t -c %s -i %s -D wired -f %s %s &' % ( - wpasupplicant_conf_file_name, host.defaultIntf(), wpasupplicant_log, - wpa_ctrl_socket), 300) + f'wpa_supplicant -dd -t -c {wpasupplicant_conf_file_name}' \ + f' -i {host.defaultIntf()} -D wired -f {wpasupplicant_log} {wpa_ctrl_socket} &', 300) host.cmd(wpasupplicant_cmd) for _ in range(timeout): if os.path.exists(wpasupplicant_log): @@ -2836,7 +2794,7 @@ def start_wpasupplicant(self, host, wpasupplicant_conf, timeout=10, log_prefix=' time.sleep(1) self.assertTrue( os.path.exists(wpasupplicant_log), - msg='wpasupplicant (%s) did not start' % wpasupplicant_cmd) + msg=f'wpasupplicant ({wpasupplicant_cmd}) did not start') return wpasupplicant_log def ping_all_when_learned(self, retries=3, hard_timeout=1): @@ -2852,8 +2810,7 @@ def ping_all_when_learned(self, retries=3, hard_timeout=1): self.assertEqual(0, loss) def match_table(self, prefix): - exp_prefix = '%s/%s' % ( - prefix.network_address, prefix.netmask) + exp_prefix = f'{prefix.network_address}/{prefix_netmask}' if prefix.version == 6: nw_dst_match = {'ipv6_dst': exp_prefix, 'dl_type': IPV6_ETH} table_id = self._IPV6_FIB_TABLE @@ -2867,7 +2824,7 @@ def wait_for_route_as_flow(self, nexthop, prefix, nonzero_packets=False): """Verify a route has been added as a flow.""" nw_dst_match, table_id = self.match_table(prefix) - nexthop_action = 'SET_FIELD: {eth_dst:%s}' % nexthop + nexthop_action = f'SET_FIELD: {{eth_dst:{nexthop}}}' if vlan_vid is not None: nw_dst_match['dl_vlan'] = str(vlan_vid) if nonzero_packets: @@ -2883,16 +2840,14 @@ def host_ipv4_alias(self, host, alias_ip, intf=None): """Add an IPv4 alias address to a host.""" if intf is None: intf = host.intf() - del_cmd = 'ip addr del %s dev %s' % ( - alias_ip.with_prefixlen, intf) - add_cmd = 'ip addr add %s dev %s label %s:1' % ( - alias_ip.with_prefixlen, intf, intf) + del_cmd = f'ip addr del {alias_ip.with_prefixlen} dev {intf}' + add_cmd = f'ip addr add {alias_ip.with_prefixlen} dev {intf} label {intf}:1' host.cmd(del_cmd) self.quiet_commands(host, (add_cmd,)) @staticmethod def _ip_neigh(host, ipa, ip_ver): - neighbors = host.cmd('ip -%u neighbor show %s' % (ip_ver, ipa)) + neighbors = host.cmd(f'ip -{ip_ver} neighbor show {ipa}') neighbors_fields = neighbors.split() if len(neighbors_fields) >= 5: return neighbors.split()[4] @@ -2903,8 +2858,7 @@ def _verify_host_learned_mac(self, host, ipa, ip_ver, mac, retries): if self._ip_neigh(host, ipa, ip_ver) == mac: return time.sleep(1) - self.fail( - 'could not verify %s resolved to %s' % (ipa, mac)) + self.fail(f'could not verify {ipa} resolved to {mac}') def verify_ipv4_host_learned_mac(self, host, ipa, mac, retries=3): self._verify_host_learned_mac(host, ipa, 4, mac, retries) @@ -2953,24 +2907,22 @@ def run_iperf(iperf_server_cmd, server_host, server_start_exp, port): for _ in range(3): port = mininet_test_util.find_free_port( self.ports_sock, self._test_name()) - iperf_base_cmd = 'iperf -f M -p %u' % port + iperf_base_cmd = f'iperf -f M -p {port}' if server_ip.version == 6: iperf_base_cmd += ' -V' - iperf_server_cmd = '%s -s -B %s' % (iperf_base_cmd, server_ip) + iperf_server_cmd = f'{iperf_base_cmd} -s -B {server_ip}' iperf_server_cmd = mininet_test_util.timeout_cmd( iperf_server_cmd, timeout) server_start_exp = r'Server listening on TCP port %u' % port iperf_client_cmd = mininet_test_util.timeout_cmd( - '%s -y c -c %s -B %s -t %u' % (iperf_base_cmd, server_ip, client_ip, seconds), - timeout) + f'{iperf_base_cmd} -y c -c {server_ip} -B {client_ip} -t {seconds}', timeout) iperf_mbps = run_iperf(iperf_server_cmd, server_host, server_start_exp, port) if iperf_mbps is not None and iperf_mbps > 0: return iperf_mbps time.sleep(1) if iperf_mbps == -1: - self.fail('iperf client %s did not connect to server %s' % ( - iperf_client_cmd, iperf_server_cmd)) - self.fail('iperf server %s never started' % iperf_server_cmd) + self.fail(f'iperf client {iperf_client_cmd} did not connect to server {iperf_server_cmd}') + self.fail(f'iperf server {iperf_server_cmd} never started') def verify_ipv4_routing(self, first_host, first_host_routed_ip, second_host, second_host_routed_ip): @@ -2998,7 +2950,7 @@ def verify_ipv4_routing(self, first_host, first_host_routed_ip, first_host, first_host_routed_ip.ip)): iperf_mbps = self.iperf( client_host, client_ip, server_host, server_ip, 5) - error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip)) + error(f'{self._test_name()}: {iperf_mbps} mbps to {server_ip}\n') self.assertGreater(iperf_mbps, 1) # verify packets matched routing flows self.wait_for_route_as_flow( @@ -3032,7 +2984,7 @@ def verify_ipv4_routing_mesh(self): @staticmethod def host_drop_all_ips(host): for ipv in (4, 6): - host.cmd('ip -%u addr flush dev %s' % (ipv, host.defaultIntf())) + host.cmd(f'ip -{ipv} addr flush dev {host.defaultIntf()}') def setup_ipv6_hosts_addresses(self, first_host, first_host_ip, first_host_routed_ip, second_host, @@ -3040,7 +2992,7 @@ def setup_ipv6_hosts_addresses(self, first_host, first_host_ip, """Configure host IPv6 addresses for testing.""" for host in first_host, second_host: for intf in ('lo', host.intf()): - host.cmd('ip -6 addr flush dev %s' % intf) + host.cmd(f'ip -6 addr flush dev {intf}') self.add_host_ipv6_address(first_host, first_host_ip) self.add_host_ipv6_address(second_host, second_host_ip) self.add_host_ipv6_address(first_host, first_host_routed_ip, intf='lo') @@ -3073,7 +3025,7 @@ def verify_ipv6_routing(self, first_host, first_host_ip, first_host, first_host_routed_ip.ip)): iperf_mbps = self.iperf( client_host, client_ip, server_host, server_ip, 5) - error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip)) + error(f'{self._test_name()}: {iperf_mbps} mbps to {server_ip}\n') self.assertGreater(iperf_mbps, 1) self.one_ipv6_ping(first_host, second_host_ip.ip) self.verify_ipv6_host_learned_mac( @@ -3122,5 +3074,4 @@ def verify_invalid_bgp_route(self, pattern): if 'FAUCET_LOG' in cont_env: lines = self.matching_lines_from_file( pattern, cont_env['FAUCET_LOG']) - self.assertGreater(len(lines), 0, msg='%s not found in %s' % - (pattern, cont_env['FAUCET_LOG'])) + self.assertGreater(len(lines), 0, msg=f'{pattern} not found in {cont_env["FAUCET_LOG"]}') diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index c5d7886ae8..46d186bfb3 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -34,8 +34,7 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, tcpdump_flags += ' -Z root' tcpdump_flags += f' -c {packets if packets else ""}' tcpdump_flags += f' -w {pcap_out if pcap_out else ""}' - tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags}' \ - f' --immediate-mode -e -n -U {tcpdump_filter}' + tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags} --immediate-mode -e -n -U {tcpdump_filter}' pipe_cmd = tcpdump_cmd if timeout: pipe_cmd = mininet_test_util.timeout_soft_cmd(tcpdump_cmd, timeout) From 305898af2c866f6e234534203403443acbda1a76 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:12:01 -0700 Subject: [PATCH 059/231] missing ) --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 8c83c828a2..c3ebfa304f 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -529,7 +529,7 @@ def tearDown(self, ignore_oferrors=False): # Verify version is logged. self.assertTrue( self.matching_lines_from_file(r'^.+version\s+(\S+)$', logfile), - msg=f'no version logged in {logfile}' + msg=f'no version logged in {logfile}') # Verify no OFErrors. oferrors += '\n\n'.join(self.matching_lines_from_file(r'^.+(OFError.+)$', logfile)) if not ignore_oferrors: From 56c276011e47c2f2d4c06dc68b191ee4414107fa Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:17:07 -0700 Subject: [PATCH 060/231] fix quotes inside quotes --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index c3ebfa304f..2dd49747d9 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -1437,7 +1437,7 @@ def _prometheus_url(self, controller): if 'faucet' in controller: return f'http://[{self.get_prom_addr()}]:{self.get_prom_port()}' if 'gauge' in controller: - return f'http://[{self.get_prom_addr()}]:{self.config_ports['gauge_prom_port']}' + return f'http://[{self.get_prom_addr()}]:{self.config_ports["gauge_prom_port"]}' raise NotImplementedError def scrape_prometheus(self, controller=None, timeout=15, var=None, verify_consistent=False): From 7b65b473119488a2c0730bae029f52e3cd8d7b96 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:20:09 -0700 Subject: [PATCH 061/231] missing ) --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 2dd49747d9..d9c72d242c 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -2161,7 +2161,7 @@ def verify_eapol_mirrored(self, first_host, second_host, mirror_host): eap_conf_cmd = ( 'echo "eapol_version=2\nap_scan=0\nnetwork={\n' 'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n' - f'password=\\"password\\"\n}\n" > {tmp_eap_conf}' + f'password=\\"password\\"\n}\n" > {tmp_eap_conf}') wpa_supplicant_cmd = mininet_test_util.timeout_cmd( f'wpa_supplicant -c{tmp_eap_conf} -Dwired -i{first_host.defaultIntf().name} -d', 3) tcpdump_txt = self.tcpdump_helper( From 6a0a6b64286032e928ccde9e7cd01b3462623fc6 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:26:37 -0700 Subject: [PATCH 062/231] fix timeout arg --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index d9c72d242c..3c3c92872c 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -2579,7 +2579,7 @@ def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4): def serve_str_on_tcp_port(self, host, port, serve_str='hello', timeout=20): """Serve str on a TCP port on a host.""" host.cmd(mininet_test_util.timeout_cmd( - f'echo {serve_str} | nc -l {host.IP()} {timeout} &') + f'echo {serve_str} | nc -l {host.IP()} {port} &', timeout)) self.wait_for_tcp_listen(host, port) def wait_nonzero_packet_count_flow(self, match, table_id, timeout=15, From 110dfbc77f624348453a1b211585f928ded5d53f Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:29:15 -0700 Subject: [PATCH 063/231] escape {} for f-string --- clib/mininet_test_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 3c3c92872c..f6e0970754 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -2159,9 +2159,9 @@ def verify_eapol_mirrored(self, first_host, second_host, mirror_host): tcpdump_filter = ( f'not ether src {mirror_mac} and ether proto 0x888e') eap_conf_cmd = ( - 'echo "eapol_version=2\nap_scan=0\nnetwork={\n' - 'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n' - f'password=\\"password\\"\n}\n" > {tmp_eap_conf}') + f'echo "eapol_version=2\nap_scan=0\nnetwork={{\n' + f'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n' + f'password=\\"password\\"\n}}\n" > {tmp_eap_conf}') wpa_supplicant_cmd = mininet_test_util.timeout_cmd( f'wpa_supplicant -c{tmp_eap_conf} -Dwired -i{first_host.defaultIntf().name} -d', 3) tcpdump_txt = self.tcpdump_helper( From e9bc987484088736875edb4ab10f9736b9d54b4f Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:37:03 -0700 Subject: [PATCH 064/231] f-string expression can't include \ --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index f6e0970754..24a1498a1b 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -2706,7 +2706,7 @@ def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err): if os.path.exists(log_name): with open(log_name, encoding='utf-8') as log: exabgp_log_content.append(log.read()) - self.fail(f'exabgp did not peer with FAUCET: {"\n".join(exabgp_log_content)}') + self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content)) @staticmethod def matching_lines_from_file(exp, log_name): From ff942d35eb6932988d50d9235ca03fc21c980f8d Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:39:40 -0700 Subject: [PATCH 065/231] typo --- clib/mininet_test_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 24a1498a1b..9f1b0c4e8b 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -1011,7 +1011,7 @@ def get_gauge_config(self, faucet_config_file, faucet_configs: - {faucet_config_file} watchers: - {self.get_gauage_watcher_config()} + {self.get_gauge_watcher_config()} dbs: stats_file: type: 'text' From c6bd0e2ba4dbb7e6850877a80e2ef11dcaf52866 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:46:00 -0700 Subject: [PATCH 066/231] f-string mangling args --- clib/tcpdump_helper.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 46d186bfb3..01c415d022 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -32,9 +32,10 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, tcpdump_flags = vflags tcpdump_flags += ' -Z root' - tcpdump_flags += f' -c {packets if packets else ""}' - tcpdump_flags += f' -w {pcap_out if pcap_out else ""}' - tcpdump_cmd = f'tcpdump -i {self.intf_name} {tcpdump_flags} --immediate-mode -e -n -U {tcpdump_filter}' + tcpdump_flags += ' -c %u' % packets if packets else '' + tcpdump_flags += ' -w %s' % pcap_out if pcap_out else '' + tcpdump_cmd = 'tcpdump -i %s %s --immediate-mode -e -n -U %s' % ( + self.intf_name, tcpdump_flags, tcpdump_filter) pipe_cmd = tcpdump_cmd if timeout: pipe_cmd = mininet_test_util.timeout_soft_cmd(tcpdump_cmd, timeout) From fad2ac6aa77656c333848deac53bc6d4329d7794 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 12:59:00 -0700 Subject: [PATCH 067/231] move to f-strings for docs conf --- docs/conf.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 694d2e3efa..1b16010f18 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -206,28 +206,28 @@ def generate_prometheus_metric_table(_): } for module in ["faucet", "gauge"]: - block_text[module] = """\ -.. list-table:: {} prometheus metrics + block_text[module] = f""" +.. list-table:: {module.title()} prometheus metrics :widths: 40 10 55 :header-rows: 1 * - Metric - Type - Description -""".format(module.title()) +""" # pylint: disable=protected-access for metric in metrics[module]._reg.collect(): if metric.type == "counter": - metric_name = "{}_total".format(metric.name) + metric_name = f"{metric.name}_total" else: metric_name = metric.name - block_text[module] += """\ - * - {} - - {} - - {} -""".format(metric_name, metric.type, metric.documentation) + block_text[module] += f""" + * - {metric_name} + - {metric.type} + - {metric.documentation} +""" with open(output_path[module], 'w', encoding='utf-8') as output_file: output_file.write(block_text[module]) From 36a9a454f764ae99f8777266c291651765c43200 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 13:09:28 -0700 Subject: [PATCH 068/231] move to f-strings for __main__ --- faucet/__main__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/faucet/__main__.py b/faucet/__main__.py index b1fea5f454..88622bac8e 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -89,12 +89,12 @@ def parse_args(sys_args): for ryu_arg in RYU_OPTIONAL_ARGS: if len(ryu_arg) >= 3: args.add_argument( - '--ryu-%s' % ryu_arg[0], + f'--ryu-{ryu_arg[0]}', help=ryu_arg[1], default=ryu_arg[2]) else: args.add_argument( - '--ryu-%s' % ryu_arg[0], + f'--ryu-{ryu_arg[0]}', help=ryu_arg[1]) return args.parse_args(sys_args) @@ -103,7 +103,7 @@ def parse_args(sys_args): def print_version(): """Print version number and exit.""" version = VersionInfo('faucet').semantic_version().release_string() - message = 'Faucet %s' % version + message = f'Faucet {version}' print(message) @@ -136,7 +136,7 @@ def build_ryu_args(argv): if arg == 'ryu_config_file' and not os.path.isfile(val): continue arg_name = arg.replace('ryu_', '').replace('_', '-') - ryu_args.append('--%s=%s' % (arg_name, val)) + ryu_args.append(f'--{arg_name}={val}') # Running Faucet or Gauge? if args.gauge or os.path.basename(prog) == 'gauge': From 818e4bb705df26ee78c0893f962d4585bf873655 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 13:20:31 -0700 Subject: [PATCH 069/231] move to f-strings for faucet acl; fix pytype issues --- clib/mininet_test_base.py | 4 ++-- faucet/acl.py | 43 +++++++++++++++++---------------------- 2 files changed, 21 insertions(+), 26 deletions(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 9f1b0c4e8b..b74e86570a 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -1868,7 +1868,7 @@ def verify_controller_fping(self, host, faucet_vip, fping_bin = 'fping' if faucet_vip.version == 6: fping_bin = 'fping6' - fping_cli = f'{fping_pin} {self.FPING_ARGS_SHORT} -b {size}' \ + fping_cli = f'{fping_bin} {self.FPING_ARGS_SHORT} -b {size}' \ f' -c {total_packets} -i {packet_interval_ms} {faucet_vip.ip}' timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5) fping_out = host.cmd(mininet_test_util.timeout_cmd( @@ -2810,7 +2810,7 @@ def ping_all_when_learned(self, retries=3, hard_timeout=1): self.assertEqual(0, loss) def match_table(self, prefix): - exp_prefix = f'{prefix.network_address}/{prefix_netmask}' + exp_prefix = f'{prefix.network_address}/{prefix.netmask}' if prefix.version == 6: nw_dst_match = {'ipv6_dst': exp_prefix, 'dl_type': IPV6_ETH} table_id = self._IPV6_FIB_TABLE diff --git a/faucet/acl.py b/faucet/acl.py index 49475dde2b..3a1cbc36fe 100644 --- a/faucet/acl.py +++ b/faucet/acl.py @@ -139,7 +139,7 @@ def __init__(self, _id, dp_id, conf): conf = {} else: raise InvalidConfigError( - 'ACL conf is an invalid type %s' % _id) + f'ACL conf is an invalid type {_id}') conf['rules'] = [] for rule in rules: normalized_rule = rule @@ -148,7 +148,7 @@ def __init__(self, _id, dp_id, conf): if normalized_rule is None: normalized_rule = {k: v for k, v in rule.items() if v is not None} test_config_condition(not isinstance(normalized_rule, dict), ( - 'ACL rule is %s not %s (%s)' % (type(normalized_rule), dict, rules))) + f'ACL rule is {type(normalized_rule)} not {dict} ({rules})')) conf['rules'].append(normalized_rule) super().__init__(_id, dp_id, conf) @@ -158,7 +158,7 @@ def finalize(self): def check_config(self): test_config_condition( - not self.rules, 'no rules found for ACL %s' % self._id) + not self.rules, f'no rules found for ACL {self._id}') for rule in self.rules: self._check_conf_types(rule, self.rule_types) for rule_field, rule_conf in rule.items(): @@ -169,7 +169,7 @@ def check_config(self): elif rule_field == 'actions': test_config_condition( not rule_conf, - 'Missing rule actions in ACL %s' % self._id) + f'Missing rule actions in ACL {self._id}') self._check_conf_types(rule_conf, self.actions_types) for action_name, action_conf in rule_conf.items(): if action_name == 'output': @@ -281,7 +281,7 @@ def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tu # Fetch tunnel items from the tunnel output dict test_config_condition( 'dp' not in tunnel, - 'ACL (%s) tunnel DP not defined' % self._id) + f'ACL ({self._id}) tunnel DP not defined') tunnel_dp = tunnel['dp'] tunnel_port = tunnel.get('port', None) tunnel_id = tunnel.get('tunnel_id', None) @@ -292,8 +292,8 @@ def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tu tunnel_reverse = tunnel.get('reverse', False) test_config_condition( tunnel_reverse and tunnel_direction, - ('Tunnel ACL %s cannot contain values for the fields' - '`bi_directional` and `reverse` at the same time' % self._id)) + (f'Tunnel ACL {self._id} cannot contain values for the fields' + '`bi_directional` and `reverse` at the same time')) # Resolve the tunnel items dst_dp, dst_port, tunnel_id = resolve_tunnel_objects( tunnel_dp, tunnel_port, tunnel_id) @@ -315,14 +315,14 @@ def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tu port = resolve_port_cb(port_name) test_config_condition( not port, - 'ACL (%s) output port undefined in DP: %s' % (self._id, self.dp_id)) + f'ACL ({self._id}) output port undefined in DP: {self.dp_id}') result.append({key: port}) elif key == 'ports': resolved_ports = [ resolve_port_cb(p) for p in value] test_config_condition( None in resolved_ports, - 'ACL (%s) output port(s) not defined in DP: %s' % (self._id, self.dp_id)) + f'ACL ({self._id}) output port(s) not defined in DP: {self.dp_id}') result.append({key: resolved_ports}) elif key == 'failover': failover = value @@ -335,8 +335,7 @@ def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tu resolve_port_cb(p) for p in failover_values] test_config_condition( None in resolved_ports, - 'ACL (%s) failover port(s) not defined in DP: %s' % ( - self._id, self.dp_id)) + f'ACL ({self._id}) failover port(s) not defined in DP: {self.dp_id}') failover_dict[failover_name] = resolved_ports else: failover_dict[failover_name] = failover_values @@ -353,17 +352,17 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj result = {} test_config_condition( 'vlan_vid' in action_conf and 'vlan_vids' in action_conf, - 'ACL %s has both vlan_vid and vlan_vids defined' % self._id) + f'ACL {self._id} has both vlan_vid and vlan_vids defined') test_config_condition( 'port' in action_conf and 'ports' in action_conf, - 'ACL %s has both port and ports defined' % self._id) + f'ACL {self._id} has both port and ports defined') for output_action, output_action_values in action_conf.items(): if output_action == 'tunnel': tunnel = output_action_values # Fetch tunnel items from the tunnel output dict test_config_condition( 'dp' not in tunnel, - 'ACL (%s) tunnel DP not defined' % self._id) + f'ACL ({self._id}) tunnel DP not defined') tunnel_dp = tunnel['dp'] tunnel_port = tunnel.get('port', None) tunnel_id = tunnel.get('tunnel_id', None) @@ -374,8 +373,8 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj tunnel_reverse = tunnel.get('reverse', False) test_config_condition( tunnel_reverse and tunnel_direction, - ('Tunnel ACL %s cannot contain values for the fields' - '`bi_directional` and `reverse` at the same time' % self._id)) + (f'Tunnel ACL {self._id} cannot contain values for the fields' + '`bi_directional` and `reverse` at the same time') # Resolve the tunnel items dst_dp, dst_port, tunnel_id = resolve_tunnel_objects( tunnel_dp, tunnel_port, tunnel_id) @@ -397,8 +396,7 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj port = resolve_port_cb(port_name) test_config_condition( not port, - ('ACL (%s) output port undefined in DP: %s' - % (self._id, self.dp_id)) + (f'ACL (+self._id}) output port undefined in DP: {self.dp_id}') ) result[output_action] = port elif output_action == 'ports': @@ -406,8 +404,7 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj resolve_port_cb(p) for p in output_action_values] test_config_condition( None in resolved_ports, - ('ACL (%s) output port(s) not defined in DP: %s' - % (self._id, self.dp_id)) + (f'ACL ({self._id}) output port(s) not defined in DP: {self.dp_id}') ) result[output_action] = resolved_ports elif output_action == 'failover': @@ -421,8 +418,7 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj resolve_port_cb(p) for p in failover_values] test_config_condition( None in resolved_ports, - ('ACL (%s) failover port(s) not defined in DP: %s' - % (self._id, self.dp_id)) + (f'ACL ({self._id}) failover port(s) not defined in DP: {self.dp_id}') ) result[output_action][failover_name] = resolved_ports else: @@ -446,8 +442,7 @@ def resolve_ports(self, resolve_port_cb, resolve_tunnel_objects): resolved_port = resolve_port_cb(action_conf) test_config_condition( resolved_port is None, - ('ACL (%s) mirror port is not defined in DP: %s' - % (self._id, self.dp_id)) + (f'ACL ({self._id}) mirror port is not defined in DP: {self.dp_id}') ) resolved_actions[action_name] = resolved_port elif action_name == 'output': From 1279024e1a6e006285490d8ee19e808fbd9d4b91 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 13:23:46 -0700 Subject: [PATCH 070/231] missing ) --- faucet/acl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/faucet/acl.py b/faucet/acl.py index 3a1cbc36fe..f78a0b6587 100644 --- a/faucet/acl.py +++ b/faucet/acl.py @@ -375,6 +375,7 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj tunnel_reverse and tunnel_direction, (f'Tunnel ACL {self._id} cannot contain values for the fields' '`bi_directional` and `reverse` at the same time') + ) # Resolve the tunnel items dst_dp, dst_port, tunnel_id = resolve_tunnel_objects( tunnel_dp, tunnel_port, tunnel_id) From 88efd84ae17e93a4d5f017253f28f24b94ddd6b2 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 13:48:03 -0700 Subject: [PATCH 071/231] typo --- faucet/acl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/acl.py b/faucet/acl.py index f78a0b6587..14de18556a 100644 --- a/faucet/acl.py +++ b/faucet/acl.py @@ -397,7 +397,7 @@ def _resolve_output_ports(self, action_conf, resolve_port_cb, resolve_tunnel_obj port = resolve_port_cb(port_name) test_config_condition( not port, - (f'ACL (+self._id}) output port undefined in DP: {self.dp_id}') + (f'ACL ({self._id}) output port undefined in DP: {self.dp_id}') ) result[output_action] = port elif output_action == 'ports': From bad7f775e85a52fb788d84f200319329e61430da Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 14:24:19 -0700 Subject: [PATCH 072/231] move to f-strings for config_parser --- faucet/config_parser.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/faucet/config_parser.py b/faucet/config_parser.py index 3f676528e0..bb7667dc6e 100644 --- a/faucet/config_parser.py +++ b/faucet/config_parser.py @@ -69,8 +69,7 @@ def _get_vlan_by_key(dp_id, vlan_key, vlans): if vlan_key == vlan.vid: return vlan test_config_condition(not isinstance(vlan_key, int), ( - 'Implicitly created VLAN %s must be an int (not %s)' % ( - vlan_key, type(vlan_key)))) + f'Implicitly created VLAN {vlan_key} must be an int (not {type(vlan_key)})')) # Create VLAN with VID, if not defined. return vlans.setdefault(vlan_key, VLAN(vlan_key, dp_id)) @@ -90,7 +89,7 @@ def _dp_parse_tagged_port_vlans(): port = Port(port_key, dp_id, port_conf) test_config_condition(str(port_key) not in (str(port.number), port.name), ( - 'Port key %s match port name or port number' % port_key)) + f'Port key {port_key} match port name or port number')) _dp_parse_native_port_vlan() _dp_parse_tagged_port_vlans() return port @@ -128,7 +127,7 @@ def _parse_port_ranges(port_ranges_conf, port_num_to_port_conf): for range_ in re.findall(r'(\d+-\d+)', str(port_range)): start_num, end_num = [int(num) for num in range_.split('-')] test_config_condition(start_num >= end_num, ( - 'Incorrect port range (%d - %d)' % (start_num, end_num))) + f'Incorrect port range ({start_num} - {end_num})')) port_nums.update(range(start_num, end_num + 1)) port_range = re.sub(range_, '', port_range) other_nums = [int(p) for p in re.findall(r'\d+', str(port_range))] @@ -176,17 +175,17 @@ def _parse_dp(dp_key, dp_conf, acls_conf, meters_conf, routers_conf, vlans_conf) test_config_condition(not isinstance(dp_conf, dict), 'DP config must be dict') dp = DP(dp_key, dp_conf.get('dp_id', None), dp_conf) test_config_condition(dp.name != dp_key, ( - 'DP key %s and DP name must match' % dp_key)) + f'DP key {dp_key} and DP name must match')) vlans = {} vids = set() for vlan_key, vlan_conf in vlans_conf.items(): vlan = VLAN(vlan_key, dp.dp_id, vlan_conf) test_config_condition(str(vlan_key) not in (str(vlan.vid), vlan.name), ( - 'VLAN %s key must match VLAN name or VLAN VID' % vlan_key)) + f'VLAN {vlan_key} key must match VLAN name or VLAN VID')) test_config_condition(not isinstance(vlan_key, (str, int)), ( - 'VLAN %s key must not be type %s' % (vlan_key, type(vlan_key)))) + f'VLAN {vlan_key} key must not be type {type(vlan_key)}')) test_config_condition(vlan.vid in vids, ( - 'VLAN VID %u multiply configured' % vlan.vid)) + f'VLAN VID {vlan.vid} multiply configured')) vlans[vlan_key] = vlan vids.add(vlan.vid) _parse_acls(dp, acls_conf) @@ -206,7 +205,7 @@ def _dp_parser_v2(dps_conf, acls_conf, meters_conf, dp_key, dp_conf, acls_conf, meters_conf, routers_conf, vlans_conf) dp_vlans.append((dp, vlans)) except InvalidConfigError as err: - raise InvalidConfigError('DP %s: %s' % (dp_key, err)) from err + raise InvalidConfigError(f'DP {dp_key}: {err}') from err # Some VLANs are created implicitly just by referencing them in tagged/native, # so we must make them available to all DPs. @@ -231,7 +230,7 @@ def _dp_parser_v2(dps_conf, acls_conf, meters_conf, dpid_refs = set() for dp in dps: test_config_condition(dp.dp_id in dpid_refs, ( - 'DPID %u is duplicated' % dp.dp_id)) + f'DPID {dp.dp_id} is duplicated')) dpid_refs.add(dp.dp_id) routers_referenced = set() @@ -239,7 +238,7 @@ def _dp_parser_v2(dps_conf, acls_conf, meters_conf, routers_referenced.update(dp.routers.keys()) for router in routers_conf: test_config_condition(router not in routers_referenced, ( - 'router %s configured but not used by any DP' % router)) + f'router {router} configured but not used by any DP')) return dps @@ -265,10 +264,10 @@ def _config_parser_v2(config_file, logname, meta_dp_state): if not config_parser_util.dp_include( config_hashes, config_contents, config_path, logname, top_confs): - raise InvalidConfigError('Error found while loading config file: %s' % config_path) + raise InvalidConfigError(f'Error found while loading config file: {config_path}') if not top_confs['dps']: - raise InvalidConfigError('DPs not configured in file: %s' % config_path) + raise InvalidConfigError(f'DPs not configured in file: {config_path}') dps = dp_preparsed_parser(top_confs, meta_dp_state) return (config_hashes, config_contents, dps, top_confs) @@ -328,7 +327,7 @@ def _watcher_parser_v2(conf, logname, prom_client): # TODO: refactor watcher_conf as a container. for dp_name in watcher_dps: if dp_name not in dps: - logger.error('DP %s in Gauge but not configured in FAUCET', dp_name) + logger.error(f'DP {dp_name} in Gauge but not configured in FAUCET') continue dp = dps[dp_name] if 'dbs' in watcher_conf: From ecab41234c6f4eb3703bd1daa54484c645205f2d Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 15:20:42 -0700 Subject: [PATCH 073/231] move to f-strings for clib valve test lib --- clib/valve_test_lib.py | 119 ++++++++++++++++++----------------------- 1 file changed, 53 insertions(+), 66 deletions(-) diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index 2b036c1edf..61adb00cb4 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -93,7 +93,7 @@ def build_dict(pkt): elif type_ == icmpv6.ICMPV6_ECHO_REQUEST: pkt_dict['echo_request_data'] = icmpv6_pkt.data.data else: - raise NotImplementedError('Unknown packet type %s \n' % icmpv6_pkt) + raise NotImplementedError(f'Unknown packet type {icmpv6_pkt} \n') ipv4_pkt = pkt.get_protocol(ipv4.ipv4) if ipv4_pkt: pkt_dict['ipv4_src'] = ipv4_pkt.src @@ -104,7 +104,7 @@ def build_dict(pkt): if type_ == icmp.ICMP_ECHO_REQUEST: pkt_dict['echo_request_data'] = icmp_pkt.data.data else: - raise NotImplementedError('Unknown packet type %s \n' % icmp_pkt) + raise NotImplementedError(f'Unknown packet type {icmp_pkt} \n') lacp_pkt = pkt.get_protocol(slow.lacp) if lacp_pkt: pkt_dict['actor_system'] = lacp_pkt.actor_system @@ -297,10 +297,10 @@ def serialize(layers): noauth_acl: noauth_acl """ + BASE_DP1_CONFIG -CONFIG = """ +CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -410,13 +410,13 @@ def serialize(layers): vid: 0x300 v400: vid: 0x400 -""" % DP1_CONFIG +""" -STACK_CONFIG = """ +STACK_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} stack: priority: 1 interfaces: @@ -472,12 +472,12 @@ def serialize(layers): vlans: v100: vid: 0x100 - """ % DP1_CONFIG + """ -STACK_LOOP_CONFIG = """ +STACK_LOOP_CONFIG = f""" dps: s1: -%s +{BASE_DP1_CONFIG} interfaces: 1: description: p1 @@ -493,7 +493,7 @@ def serialize(layers): description: p3 native_vlan: v100 s2: -%s +{BASE_DP_CONFIG} faucet_dp_mac: 0e:00:00:00:01:02 dp_id: 0x2 interfaces: @@ -511,7 +511,7 @@ def serialize(layers): description: p3 native_vlan: v100 s3: -%s +{BASE_DP_CONFIG} faucet_dp_mac: 0e:00:00:00:01:03 dp_id: 0x3 stack: @@ -533,7 +533,7 @@ def serialize(layers): vlans: v100: vid: 0x100 -""" % (BASE_DP1_CONFIG, BASE_DP_CONFIG, BASE_DP_CONFIG) +""" class ValveTestBases: @@ -810,7 +810,7 @@ def update_config(self, config, table_dpid=None, reload_type='cold', self.configure_network() else: if reload_type is not None: - var = 'faucet_config_reload_%s_total' % reload_type + var = f'faucet_config_reload_{reload_type}_total' self.prom_inc( reload_func, var=var, inc_expected=reload_expected, dp_id=table_dpid) else: @@ -952,7 +952,7 @@ def prom_inc(self, func, var, labels=None, inc_expected=True, dp_id=None): before = self.get_prom(var, labels, dp_id) func() after = self.get_prom(var, labels, dp_id) - msg = '%s %s before %f after %f' % (var, labels, before, after) + msg = f'{var} {labels} before {before} after {after}' if inc_expected: self.assertEqual(before + 1, after, msg=msg) else: @@ -1055,8 +1055,7 @@ def port_expected_status(self, port_no, exp_status, dp_id=None): status = int(self.get_prom('port_status', labels=labels, dp_id=dp_id)) self.assertEqual( status, exp_status, - msg='status %u != expected %u for port %s' % ( - status, exp_status, labels)) + msg=f'status {status} != expected {exp_status} for port {labels}') def get_other_valves(self, valve): """Return other running valves""" @@ -1160,8 +1159,7 @@ def trigger_stack_ports(self, ignore_ports=None): exp_state = 4 self.assertEqual( port.dyn_stack_current_state, exp_state, - '%s stack state %s != %s' % ( - port, port.dyn_stack_current_state, exp_state)) + f'{port} stack state {port.dyn_stack_current_state} != {exp_state}') # Send LLDP packets to reset the stack ports that we want to be up for dp_id, valve in self.valves_manager.valves.items(): for port in valve.dp.ports.values(): @@ -1180,8 +1178,7 @@ def trigger_stack_ports(self, ignore_ports=None): exp_state = 4 self.assertEqual( port.dyn_stack_current_state, exp_state, - '%s stack state %s != %s' % ( - port, port.dyn_stack_current_state, exp_state)) + f'{port} stack state {port.dyn_stack_current_state} != {exp_state}') def flap_port(self, port_no): """Flap op status on a port.""" @@ -1218,10 +1215,10 @@ def down_stack_port(self, port): def _update_port_map(self, port, add_else_remove): this_dp = port.dp_id this_num = port.number - this_key = '%s:%s' % (this_dp, this_num) + this_key = f'{this_dp}:{this_num}' peer_dp = port.stack['dp'].dp_id peer_num = port.stack['port'].number - peer_key = '%s:%s' % (peer_dp, peer_num) + peer_key = f'{peer_dp}:{peer_num}' key_array = [this_key, peer_key] key_array.sort() key = key_array[0] @@ -1308,8 +1305,8 @@ def pkt_match(self, src, dst): return { 'eth_src': '00:00:00:01:00:%02x' % src, 'eth_dst': '00:00:00:01:00:%02x' % dst, - 'ipv4_src': '10.0.0.%d' % src, - 'ipv4_dst': '10.0.0.%d' % dst, + 'ipv4_src': f'10.0.0.{src}', + 'ipv4_dst': f'10.0.0.{dst}', 'vid': self.V100 } @@ -1411,8 +1408,7 @@ def _verify_flood_to_port(match, port, valve_vlan, port_number=None): match, in_port, valve_vlan, ofp.OFPP_IN_PORT) self.assertEqual( in_port.hairpin, hairpin_output, - msg='hairpin flooding incorrect (expected %s got %s)' % ( - in_port.hairpin, hairpin_output)) + msg=f'hairpin flooding incorrect (expected {in_port.hairping} got {hairpin_output})') for port in valve_vlan.get_ports(): output = _verify_flood_to_port(match, port, valve_vlan) @@ -1423,27 +1419,24 @@ def _verify_flood_to_port(match, port, valve_vlan, port_number=None): # Packet must be flooded to all ports on the VLAN. if port == in_port: self.assertEqual(port.hairpin, output, - 'unexpected hairpin flood %s %u' % ( - match, port.number)) + f'unexpected hairpin flood {match} {port.number}') else: self.assertTrue( output, msg=( - '%s with unknown eth_dst not flooded' - ' on VLAN %u to port %u\n%s' % ( - match, valve_vlan.vid, - port.number, self.network.tables[dp_id]))) + f'{match} with unknown eth_dst not flooded' + f' on VLAN {valve_vlan.vid} to port {port.number}\n{self.network.tables[dp_id]}')) # Packet must not be flooded to ports not on the VLAN. for port in remaining_ports: if port.stack: self.assertTrue( self.network.tables[dp_id].is_output(match, port=port.number), - msg=('Unknown eth_dst not flooded to stack port %s' % port)) + msg=(f'Unknown eth_dst not flooded to stack port {port}')) elif not port.mirror: self.assertFalse( self.network.tables[dp_id].is_output(match, port=port.number), - msg=('Unknown eth_dst flooded to non-VLAN/stack/mirror %s' % port)) + msg=(f'Unknown eth_dst flooded to non-VLAN/stack/mirror {port}')) def verify_pkt(self, pkt, expected_pkt): """ @@ -1457,7 +1450,7 @@ def verify_pkt(self, pkt, expected_pkt): for key in expected_pkt: self.assertTrue( key in pkt_dict, - 'key %s not in pkt %s' % (key, pkt_dict)) + f'key {key} not in pkt {pkt_dict}') if expected_pkt[key] is None: # Sometimes we may not know that correct value but # want to ensure that there exists a value so use the None @@ -1465,7 +1458,7 @@ def verify_pkt(self, pkt, expected_pkt): continue self.assertEqual( expected_pkt[key], pkt_dict[key], - 'key: %s not matching (%s != %s)' % (key, expected_pkt[key], pkt_dict[key])) + f'key: {key} not matching ({expected_pkt[key]} != {pkt_dict[key]})') def verify_route_add_del(self, dp_id, vlan_vid, ip_gw, ip_dst): """ @@ -1936,8 +1929,7 @@ def test_unknown_eth_src(self): vid = vid | ofp.OFPVID_PRESENT self.assertTrue( self.network.tables[self.DP_ID].is_output(match, ofp.OFPP_CONTROLLER, vid=vid), - msg="Packet with unknown ethernet src not sent to controller: " - "{0}".format(match)) + msg=f"Packet with unknown ethernet src not sent to controller: {match}") def test_unknown_eth_dst_rule(self): """Test that packets with unkown eth dst addrs get flooded correctly. @@ -1994,8 +1986,7 @@ def test_known_eth_src_rule(self): for match in matches: self.assertFalse( self.network.tables[self.DP_ID].is_output(match, port=ofp.OFPP_CONTROLLER), - msg="Packet ({0}) output to controller when eth_src address" - " is known".format(match)) + msg=f"Packet ({match}) output to controller when eth_src address is known") def test_known_eth_src_deletion(self): """Verify that when a mac changes port the old rules get deleted. @@ -2050,8 +2041,7 @@ def test_known_eth_dst_rule(self): for port in incorrect_ports: self.assertFalse( self.network.tables[self.DP_ID].is_output(match, port=port), - msg=('packet %s output to incorrect port %u when eth_dst ' - 'is known' % (match, port))) + msg=f'packet {match} output to incorrect port {port} when eth_dst is known') self.verify_expiry() def test_mac_vlan_separation(self): @@ -2120,9 +2110,8 @@ def test_port_delete_eth_dst(self): vid = 0 self.assertTrue( self.network.tables[self.DP_ID].is_output(match, port=port.number, vid=vid), - msg=('packet %s with eth dst learnt on deleted port not output ' - 'correctly on vlan %u to port %u' % ( - match, valve_vlan.vid, port.number))) + msg=(f'packet {match} with eth dst learnt on deleted port not output ' + 'correctly on vlan {valve_vlan.vid} to port {port.number}')) def test_port_down_eth_src_removal(self): """Test that when a port goes down and comes back up learnt mac @@ -2154,11 +2143,11 @@ def test_port_add_input(self): def test_dp_acl_deny(self): """Test DP acl denies forwarding""" - acl_config = """ + acl_config = f""" dps: s1: dp_acls: [drop_non_ospf_ipv4] -%s +{DP1_CONFIG} interfaces: p2: number: 2 @@ -2196,7 +2185,7 @@ def test_dp_acl_deny(self): rate: 1 } ] -""" % DP1_CONFIG +""" drop_match = { 'in_port': 2, @@ -2219,11 +2208,11 @@ def test_dp_acl_deny(self): def test_dp_acl_deny_ordered(self): """Test DP acl denies forwarding""" - acl_config = """ + acl_config = f""" dps: s1: dp_acls: [drop_non_ospf_ipv4] -%s +{DP1_CONFIG} interfaces: p2: number: 2 @@ -2261,7 +2250,7 @@ def test_dp_acl_deny_ordered(self): rate: 1 } ] -""" % DP1_CONFIG +""" drop_match = { 'in_port': 2, @@ -2284,10 +2273,10 @@ def test_dp_acl_deny_ordered(self): def test_port_acl_deny(self): """Test that port ACL denies forwarding.""" - acl_config = """ + acl_config = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p2: number: 2 @@ -2323,7 +2312,7 @@ def test_port_acl_deny(self): rate: 1 } ] -""" % DP1_CONFIG +""" drop_match = { 'in_port': 2, @@ -2616,20 +2605,18 @@ def base_config(self): def create_config(self): """Create the config file""" - self.CONFIG = """ + self.CONFIG = f""" vlans: vlan100: vid: 0x100 - faucet_mac: '%s' - faucet_vips: ['%s'] + faucet_mac: '{self.VLAN100_FAUCET_MAC}' + faucet_vips: ['{self.VLAN100_FAUCET_VIP_SPACE}'] vlan200: vid: 0x200 - faucet_mac: '%s' - faucet_vips: ['%s'] - %s - """ % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE, - self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE, - self.base_config()) + faucet_mac: '{self.VLAN200_FAUCET_MAC}' + faucet_vips: ['{self.VLAN200_FAUCET_VIP_SPACE}'] + {self.base_config()} + """ def setup_stack_routing(self): """Create a stacking config file.""" @@ -2640,12 +2627,12 @@ def setup_stack_routing(self): @staticmethod def create_mac(vindex, host): """Create a MAC address string""" - return '00:00:00:0%u:00:0%u' % (vindex, host) + return f'00:00:00:0{vindex}:00:0{host}' @staticmethod def create_ip(vindex, host): """Create a IP address string""" - return '10.0.%u.%u' % (vindex, host) + return f'10.0.{vindex}.{host}' @staticmethod def get_eth_type(): From f94f9da47de80f9225f0891863938005be33bc6f Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 15:44:54 -0700 Subject: [PATCH 074/231] move to f-strings for faucet dp --- faucet/dp.py | 101 +++++++++++++++++++++++---------------------------- 1 file changed, 46 insertions(+), 55 deletions(-) diff --git a/faucet/dp.py b/faucet/dp.py index 40b62bc9ef..fabf5c332c 100644 --- a/faucet/dp.py +++ b/faucet/dp.py @@ -352,13 +352,13 @@ def check_config(self): """Check configuration of this dp""" super().check_config() test_config_condition(not isinstance(self.dp_id, int), ( - 'dp_id must be %s not %s' % (int, type(self.dp_id)))) + f'dp_id must be {int} not {type(self.dp_id)}')) test_config_condition(self.dp_id < 0 or self.dp_id > 2**64 - 1, ( - 'DP ID %s not in valid range' % self.dp_id)) + f'DP ID {self.dp_id} not in valid range')) test_config_condition(not netaddr.valid_mac(self.faucet_dp_mac), ( - 'invalid MAC address %s' % self.faucet_dp_mac)) + f'invalid MAC address {self.faucet_dp_mac}')) test_config_condition(not (self.interfaces or self.interface_ranges), ( - 'DP %s must have at least one interface' % self)) + f'DP {self} must have at least one interface')) test_config_condition(self.timeout < 15, 'timeout must be > 15') test_config_condition(self.timeout > 65535, 'timeout cannot be > than 65335') # To prevent L2 learning from timing out before L3 can refresh @@ -392,7 +392,7 @@ def _lldp_defaults(self): if 'send_interval' not in self.lldp_beacon: self.lldp_beacon['send_interval'] = self.DEFAULT_LLDP_SEND_INTERVAL test_config_condition(self.lldp_beacon['send_interval'] < 1, ( - 'DP ID %s LLDP beacon send_interval not in valid range' % self.dp_id)) + f'DP ID {self.dp_id} LLDP beacon send_interval not in valid range')) if 'max_per_interval' not in self.lldp_beacon: self.lldp_beacon['max_per_interval'] = self.DEFAULT_LLDP_MAX_PER_INTERVAL self.lldp_beacon = self._set_unknown_conf( @@ -470,7 +470,7 @@ def pipeline_str(self): (table.table_id, str(table.table_config)) for table in self.tables.values()]) return '\n'.join([ - 'table ID %u %s' % (table_id, table_config) + f'table ID {table_id} {table_config}' for table_id, table_config in table_configs]) def pipeline_tableids(self): @@ -481,8 +481,7 @@ def _configure_tables(self): """Configure FAUCET pipeline with tables.""" valve_cl = SUPPORTED_HARDWARE.get(self.hardware, None) test_config_condition( - not valve_cl, 'hardware %s must be in %s' % ( - self.hardware, list(SUPPORTED_HARDWARE))) + not valve_cl, f'hardware {self.hardare} must be in {list(SUPPORTED_HARDWARE)}') if valve_cl is None: return @@ -497,7 +496,7 @@ def _configure_tables(self): # Only configure IP routing tables if enabled. for vlan in self.vlans.values(): for ipv in vlan.ipvs(): - included_tables.add('ipv%u_fib' % ipv) + included_tables.add(f'ipv{ipv}_fib') included_tables.add('vip') if valve_cl.STATIC_TABLE_IDS: included_tables.add('port_acl') @@ -548,12 +547,12 @@ def _configure_tables(self): set_fields = set(table_config.set_fields) test_config_condition( not set_fields.issubset(oxm_fields), - 'set_fields not all OpenFlow OXM fields %s' % (set_fields - oxm_fields)) + f'set_fields not all OpenFlow OXM fields {set_fields - oxm_fields}') if table_config.match_types: matches = set(match for match, _ in table_config.match_types) test_config_condition( not matches.issubset(oxm_fields), - 'matches not all OpenFlow OXM fields %s' % (matches - oxm_fields)) + f'matches not all OpenFlow OXM fields {matches - oxm_fields}') scale_factor = 1.0 # Need flows for internal/external. @@ -887,7 +886,7 @@ def resolve_ports(port_names): def resolve_vlan(vlan_name): """Resolve VLAN by name or VID.""" test_config_condition(not isinstance(vlan_name, (str, int)), ( - 'VLAN must be type %s or %s not %s' % (str, int, type(vlan_name)))) + f'VLAN must be type {str} or {int} not {type(vlan_name)}')) if vlan_name in vlan_by_name: return vlan_by_name[vlan_name] if vlan_name in self.vlans: @@ -910,13 +909,13 @@ def resolve_stack_dps(): for port in self.stack_ports(): stack_dp = port.stack['dp'] test_config_condition(stack_dp not in dp_by_name, ( - 'stack DP %s not defined' % stack_dp)) + f'stack DP {stack_dp} not defined')) port_stack_dp[port] = dp_by_name[stack_dp] for port, dp in port_stack_dp.items(): port.stack['dp'] = dp stack_port = dp.resolve_port(port.stack['port']) test_config_condition(stack_port is None, ( - 'stack port %s not defined in DP %s' % (port.stack['port'], dp.name))) + f'stack port {port.stack["port"]} not defined in DP{dp.name}')) port.stack['port'] = stack_port def resolve_mirror_destinations(): @@ -926,7 +925,7 @@ def resolve_mirror_destinations(): if mirror_port.mirror is not None: mirrored_ports = resolve_ports(mirror_port.mirror) test_config_condition(len(mirrored_ports) != len(mirror_port.mirror), ( - 'port mirror not defined in DP %s' % self.name)) + f'port mirror not defined in DP {self.name}')) for mirrored_port in mirrored_ports: mirror_from_port[mirrored_port].append(mirror_port) @@ -950,7 +949,7 @@ def resolve_acl(acl_in, vid=None, port_num=None): matches, set_fields, meter (3-Tuple): ACL matches, set fields and meter values """ test_config_condition(acl_in not in self.acls, ( - 'missing ACL %s in DP: %s' % (acl_in, self.name))) + f'missing ACL {acl_in} in DP:{self.name}')) acl = self.acls[acl_in] tunnel_dsts_to_vlan = {} @@ -997,12 +996,12 @@ def get_tunnel_vlan(tunnel_id_name, resolved_dst): if tunnel_vlan: # VLAN exists, i.e: user specified the VLAN so check if it is reserved test_config_condition(not tunnel_vlan.reserved_internal_vlan, ( - 'VLAN %s is required for use by tunnel %s but is not reserved' % ( - tunnel_vlan.name, tunnel_id_name))) + f'VLAN {tunnel_vlan.name} is required for use by' \ + f' tunnel {tunnel_id_name} but is not reserved')) else: # VLAN does not exist, so the ID should be the VID the user wants test_config_condition(isinstance(tunnel_id_name, str), ( - 'Tunnel VLAN (%s) does not exist' % tunnel_id_name)) + f'Tunnel VLAN ({tunnel_id_name}) does not exist')) # Create the tunnel VLAN object tunnel_vlan = create_vlan(tunnel_id_name) tunnel_vlan.reserved_internal_vlan = True @@ -1010,8 +1009,8 @@ def get_tunnel_vlan(tunnel_id_name, resolved_dst): if existing_tunnel_vlan is not None: test_config_condition( existing_tunnel_vlan == tunnel_vlan.vid, - 'Cannot have multiple tunnel IDs (%u, %u) to same destination %s' % ( - existing_tunnel_vlan.vid, tunnel_vlan.vid, resolved_dst)) + f'Cannot have multiple tunnel IDs ({existing_tunnel_vlan.vid},' \ + f' {tunnel_vlan.vid}) to same destination {resolved_dst}') return tunnel_vlan def resolve_tunnel_objects(dst_dp_name, dst_port_name, tunnel_id_name): @@ -1029,18 +1028,16 @@ def resolve_tunnel_objects(dst_dp_name, dst_port_name, tunnel_id_name): test_config_condition(vid is not None, 'Tunnels do not support VLAN-ACLs') # Port & DP tunnel ACL test_config_condition(dst_dp_name not in dp_by_name, ( - 'Could not find referenced destination DP (%s) for tunnel ACL %s' % ( - dst_dp_name, acl_in))) + f'Could not find referenced destination DP ({dst_dp_name}) for tunnel ACL {acl_in}')) dst_dp = dp_by_name[dst_dp_name] dst_port = None if dst_port_name: dst_port = dst_dp.resolve_port(dst_port_name) test_config_condition(dst_port is None, ( - 'Could not find referenced destination port (%s) for tunnel ACL %s' % ( + f'Could not find referenced destination port ({dst_port_name}) for tunnel ACL {acl_in}')) dst_port_name, acl_in))) test_config_condition(dst_port.stack is None, ( - 'destination port %s for tunnel ACL %s cannot be a stack port' % ( - dst_port_name, acl_in))) + f'destination port {dst_port_name} for tunnel ACL {acl_in} cannot be a stack port')) dst_port = dst_port.number dst_dp = dst_dp.name resolved_dst = (dst_dp, dst_port) @@ -1054,7 +1051,7 @@ def resolve_tunnel_objects(dst_dp_name, dst_port_name, tunnel_id_name): acl.resolve_ports(resolve_port_cb, resolve_tunnel_objects) for meter_name in acl.get_meters(): test_config_condition(meter_name not in self.meters, ( - 'meter %s is not configured' % meter_name)) + f'meter {meter_name} is not configured')) acl_meters.add(meter_name) for port_no in acl.get_mirror_destinations(): port = self.ports[port_no] @@ -1144,7 +1141,7 @@ def resolve_routers(): vids = {vlan.vid for vlan in self.vlans.values()} test_config_condition( self.global_vlan in vids, - 'global_vlan VID %s conflicts with existing VLAN' % self.global_vlan) + f'global_vlan VID {self.global_vlan} conflicts with existing VLAN') # Check for overlapping VIP subnets or VLANs. all_router_vlans = set() @@ -1154,7 +1151,7 @@ def resolve_routers(): lone_vlan = router.vlans[0] test_config_condition( lone_vlan in all_router_vlans, - 'single VLAN %s in more than one router' % lone_vlan) + f'single VLAN {lone_vlan} in more than one router') for vlan in router.vlans: vips.update({vip for vip in vlan.faucet_vips if not vip.ip.is_link_local}) all_router_vlans.update(router.vlans) @@ -1162,25 +1159,23 @@ def resolve_routers(): for other_vip in vips - set([vip]): test_config_condition( vip.ip in other_vip.network, - 'VIPs %s and %s overlap in router %s' % ( - vip, other_vip, router_name)) + f'VIPs {vip} and {other_vip} overlap in router {router_name}') bgp_routers = self.bgp_routers() if bgp_routers: for bgp_router in bgp_routers: bgp_vlan = bgp_router.bgp_vlan() vlan_dp_ids = [str(dp.dp_id) for dp in dps if bgp_vlan.vid in dp.vlans] test_config_condition(len(vlan_dp_ids) != 1, ( - 'DPs (%s) sharing a BGP speaker VLAN (%s) is unsupported') % ( - ', '.join(vlan_dp_ids), bgp_vlan.vid)) + f'DPs ({", ".join(vlan_dp_ids)}) sharing a BGP speaker VLAN ({bgp_vlan.vid}) is unsupported')) test_config_condition(bgp_router.bgp_server_addresses() != ( bgp_routers[0].bgp_server_addresses()), ( 'BGP server addresses must all be the same')) router_ids = {bgp_router.bgp_routerid() for bgp_router in bgp_routers} test_config_condition( - len(router_ids) != 1, 'BGP router IDs must all be the same: %s' % router_ids) + len(router_ids) != 1, f'BGP router IDs must all be the same: {router_ids}') bgp_ports = {bgp_router.bgp_port() for bgp_router in bgp_routers} test_config_condition( - len(bgp_ports) != 1, 'BGP ports must all be the same: %s' % bgp_ports) + len(bgp_ports) != 1, f'BGP ports must all be the same: {bgp_ports}') if not self.stack_ports(): # Revert back to None if there are no stack ports @@ -1191,7 +1186,7 @@ def resolve_routers(): test_config_condition( not self.vlans and not self.non_vlan_ports(), - 'no VLANs referenced by interfaces in %s' % self.name) + f'no VLANs referenced by interfaces in {self.name}') dp_by_name = {dp.name: dp for dp in dps} vlan_by_name = {vlan.name: vlan for vlan in self.vlans.values()} loop_protect_external_ports = { @@ -1263,18 +1258,16 @@ def _get_conf_changes(logger, conf_name, subconf, new_subconf, diff=False, ignor new_conf, ignore_keys=(ignore_keys.union(['description']))): same_confs.add(conf_id) description_only_confs.add(conf_id) - logger.info('%s %s description only changed' % ( - conf_name, conf_id)) + logger.info(f'{conf_name} {conf_id} description only changed') else: changed_confs.add(conf_id) if diff: - logger.info('%s %s changed: %s' % ( - conf_name, conf_id, old_conf.conf_diff(new_conf))) + logger.info(f'{conf_name} {conf_id} changed: {old_conf.conf_diff(new_conf)}') else: - logger.info('%s %s changed' % (conf_name, conf_id)) + logger.info(f'{conf_name} {conf_id} changed') else: added_confs.add(conf_id) - logger.info('%s %s added' % (conf_name, conf_id)) + logger.info(f'{conf_name} {conf_id} added') for conf_id in same_confs: old_conf = subconf[conf_id] @@ -1283,13 +1276,14 @@ def _get_conf_changes(logger, conf_name, subconf, new_subconf, diff=False, ignor changes = deleted_confs or added_confs or changed_confs if changes: if deleted_confs: - logger.info('%ss deleted: %s' % (conf_name, deleted_confs)) + logger.info(f'{conf_name}s deleted: {deleted_confs}') if added_confs: - logger.info('%ss added: %s' % (conf_name, added_confs)) + logger.info(f'{conf_name}s added: {added_confs}') if changed_confs: - logger.info('%ss changed: %s' % (conf_name, changed_confs)) + logger.info(f'{conf_name}s changed: {changed_congs}') else: - logger.info('no %s changes' % conf_name) + logger.info(f'no {conf_name} changes') + return ( changes, deleted_confs, added_confs, changed_confs, same_confs, description_only_confs) @@ -1411,8 +1405,7 @@ def _add_changed_vlans(old_port, new_port): old_port = self.ports[port_no] new_port = new_dp.ports[port_no] if old_port.mirror != new_port.mirror: - logger.info('port %s mirror options changed: %s' % ( - port_no, new_port.mirror)) + logger.info(f'port {port_no} mirror options changed: {new_port.mirror}') changed_ports.add(port_no) # ACL changes new_acl_ids = new_port.acls_in @@ -1425,16 +1418,14 @@ def _add_changed_vlans(old_port, new_port): old_acl_ids = [acl._id for acl in old_acl_ids] if port_acls_changed: changed_acl_ports.add(port_no) - logger.info('port %s ACL changed (ACL %s content changed)' % ( - port_no, port_acls_changed)) + logger.info(f'port {port_no} ACL changed (ACL {port_acls_changed} content changed)') elif (old_acl_ids or new_acl_ids) and old_acl_ids != new_acl_ids: changed_acl_ports.add(port_no) - logger.info('port %s ACL changed (ACL %s to %s)' % ( - port_no, old_acl_ids, new_acl_ids)) + logger.info(f'port {port_no} ACL changed (ACL {old_acl_ids} to {new_acl_ids})') if changed_acl_ports: same_ports -= changed_acl_ports - logger.info('ports where ACL only changed: %s' % changed_acl_ports) + logger.info(f'ports where ACL only changed: {changed_acl_ports}') same_ports -= changed_ports changed_vlans -= deleted_vlans @@ -1445,7 +1436,7 @@ def _add_changed_vlans(old_port, new_port): if vlan.faucet_vips: changed_vlans_with_vips.append(vlan) if changed_vlans_with_vips: - logger.info('forcing cold start because %s has routing' % changed_vlans_with_vips) + logger.info(f'forcing cold start because {changed_vlans_with_vips} has routing') all_ports_changed = True return (all_ports_changed, deleted_ports, @@ -1495,7 +1486,7 @@ def get_config_changes(self, logger, new_dp): logger.info('DP routers config changed - requires cold start') elif not self.ignore_subconf( new_dp, ignore_keys=['interfaces', 'interface_ranges', 'routers']): - logger.info('DP config changed - requires cold start: %s' % self.conf_diff(new_dp)) + logger.info(f'DP config changed - requires cold start: {self.conf_diff(new_dp)}') else: changed_acls = self._get_acl_config_changes(logger, new_dp) deleted_vlans, changed_vlans = self._get_vlan_config_changes(logger, new_dp) From 574eff857ca8b78cc9a3091287be2faf7d29ae28 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 15:59:24 -0700 Subject: [PATCH 075/231] remove erroneous line --- faucet/dp.py | 1 - 1 file changed, 1 deletion(-) diff --git a/faucet/dp.py b/faucet/dp.py index fabf5c332c..52e03c31ae 100644 --- a/faucet/dp.py +++ b/faucet/dp.py @@ -1035,7 +1035,6 @@ def resolve_tunnel_objects(dst_dp_name, dst_port_name, tunnel_id_name): dst_port = dst_dp.resolve_port(dst_port_name) test_config_condition(dst_port is None, ( f'Could not find referenced destination port ({dst_port_name}) for tunnel ACL {acl_in}')) - dst_port_name, acl_in))) test_config_condition(dst_port.stack is None, ( f'destination port {dst_port_name} for tunnel ACL {acl_in} cannot be a stack port')) dst_port = dst_port.number From 589572c12d945b3921e21b6624f7c378bf6ae5b6 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 16:01:41 -0700 Subject: [PATCH 076/231] typo --- faucet/dp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/dp.py b/faucet/dp.py index 52e03c31ae..a0e02d667c 100644 --- a/faucet/dp.py +++ b/faucet/dp.py @@ -481,7 +481,7 @@ def _configure_tables(self): """Configure FAUCET pipeline with tables.""" valve_cl = SUPPORTED_HARDWARE.get(self.hardware, None) test_config_condition( - not valve_cl, f'hardware {self.hardare} must be in {list(SUPPORTED_HARDWARE)}') + not valve_cl, f'hardware {self.hardware} must be in {list(SUPPORTED_HARDWARE)}') if valve_cl is None: return From bf0eaa778edf28226b1335ac33f0079b7bc8fea8 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 16:08:32 -0700 Subject: [PATCH 077/231] move to f-strings --- clib/valve_test_lib.py | 2 +- faucet/faucet_bgp.py | 21 ++++++++++----------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index 61adb00cb4..c2625b2ba0 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -1408,7 +1408,7 @@ def _verify_flood_to_port(match, port, valve_vlan, port_number=None): match, in_port, valve_vlan, ofp.OFPP_IN_PORT) self.assertEqual( in_port.hairpin, hairpin_output, - msg=f'hairpin flooding incorrect (expected {in_port.hairping} got {hairpin_output})') + msg=f'hairpin flooding incorrect (expected {in_port.hairpin} got {hairpin_output})') for port in valve_vlan.get_ports(): output = _verify_flood_to_port(match, port, valve_vlan) diff --git a/faucet/faucet_bgp.py b/faucet/faucet_bgp.py index 39c05f5a6c..4e7fbaf32b 100644 --- a/faucet/faucet_bgp.py +++ b/faucet/faucet_bgp.py @@ -34,8 +34,7 @@ def __init__(self, dp_id, vlan_vid, ipv): self.ipv = ipv def __str__(self): - return 'BGP speaker key DP ID: %u, VLAN VID: %u, IP version: %u' % ( - self.dp_id, self.vlan_vid, self.ipv) + return f'BGP speaker key DP ID: {self.dp_id}, VLAN VID: {self.vlan_vid}, IP version: {self.ipv}' def __repr__(self): return self.__str__() @@ -81,11 +80,11 @@ def _neighbor_states(bgp_speaker): @kill_on_exception(exc_logname) def _bgp_up_handler(self, remote_ip, remote_as): - self.logger.info('BGP peer router ID %s AS %s up' % (remote_ip, remote_as)) + self.logger.info(f'BGP peer router ID {remote_ip} AS {remote_as} up') @kill_on_exception(exc_logname) def _bgp_down_handler(self, remote_ip, remote_as): - self.logger.info('BGP peer router ID %s AS %s down' % (remote_ip, remote_as)) + self.logger.info(f'BGP peer router ID {remote_ip} AS {remote_as} down') # TODO: delete RIB routes for down peer. @kill_on_exception(exc_logname) @@ -101,20 +100,20 @@ def _bgp_route_handler(self, path_change, bgp_speaker_key): if vlan is None: return prefix = ipaddress.ip_network(str(path_change.prefix)) - route_str = 'BGP route %s' % prefix + route_str = f'BGP route {prefix}' if path_change.next_hop: nexthop = ipaddress.ip_address(str(path_change.next_hop)) - route_str = 'BGP route %s nexthop %s' % (prefix, nexthop) + route_str = f'BGP route {prefix} nexthop {nexthop}' if vlan.is_faucet_vip(nexthop): self.logger.error( - 'Skipping %s because nexthop cannot be us' % route_str) + f'Skipping {route_str} because nexthop cannot be us') return if valve.router_vlan_for_ip_gw(vlan, nexthop) is None: self.logger.info( - 'Skipping %s because nexthop not in %s' % (route_str, vlan)) + f'Skipping {route_str} because nexthop not in {vlan}') return if bgp_speaker_key not in self._dp_bgp_rib: @@ -179,18 +178,18 @@ def shutdown_bgp_speakers(self): def _add_bgp_speaker(self, valve, bgp_speaker_key, bgp_router): if bgp_speaker_key in self._dp_bgp_speakers: - self.logger.info('Skipping re/configuration of existing %s' % bgp_speaker_key) + self.logger.info(f'Skipping re/configuration of existing {bgp_speaker_key}') bgp_speaker = self._dp_bgp_speakers[bgp_speaker_key] if bgp_speaker_key in self._dp_bgp_rib: # Re-add routes (to avoid flapping BGP even when VLAN cold starts). for prefix, nexthop in self._dp_bgp_rib[bgp_speaker_key].items(): - self.logger.info('Re-adding %s via %s' % (prefix, nexthop)) + self.logger.info(f'Re-adding {prefix} via {nexthop}') bgp_vlan = bgp_router.bgp_vlan() flowmods = valve.add_route(bgp_vlan, nexthop, prefix) if flowmods: self._send_flow_msgs(valve, flowmods) else: - self.logger.info('Adding %s' % bgp_speaker_key) + self.logger.info(f'Adding {bgp_speaker_key}') bgp_speaker = self._create_bgp_speaker_for_vlan(bgp_speaker_key, bgp_router) return {bgp_speaker_key: bgp_speaker} From 77c212a37b46f75c13f2bfc63caf980a92e806fe Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 16:28:52 -0700 Subject: [PATCH 078/231] more f-string changes --- clib/valve_test_lib.py | 40 ++++++++++++++++++++-------------------- faucet/faucet_dot1x.py | 19 ++++++++----------- 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index c2625b2ba0..4c6a750065 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -297,10 +297,10 @@ def serialize(layers): noauth_acl: noauth_acl """ + BASE_DP1_CONFIG -CONFIG = f""" +CONFIG = """ dps: s1: -{DP1_CONFIG} +%s interfaces: p1: number: 1 @@ -410,13 +410,13 @@ def serialize(layers): vid: 0x300 v400: vid: 0x400 -""" +""" % DP1_CONFIG -STACK_CONFIG = f""" +STACK_CONFIG = """ dps: s1: -{DP1_CONFIG} +%s stack: priority: 1 interfaces: @@ -472,12 +472,12 @@ def serialize(layers): vlans: v100: vid: 0x100 - """ + """ % DP1_CONFIG -STACK_LOOP_CONFIG = f""" +STACK_LOOP_CONFIG = """ dps: s1: -{BASE_DP1_CONFIG} +%s interfaces: 1: description: p1 @@ -493,7 +493,7 @@ def serialize(layers): description: p3 native_vlan: v100 s2: -{BASE_DP_CONFIG} +%s faucet_dp_mac: 0e:00:00:00:01:02 dp_id: 0x2 interfaces: @@ -511,7 +511,7 @@ def serialize(layers): description: p3 native_vlan: v100 s3: -{BASE_DP_CONFIG} +%s faucet_dp_mac: 0e:00:00:00:01:03 dp_id: 0x3 stack: @@ -533,7 +533,7 @@ def serialize(layers): vlans: v100: vid: 0x100 -""" +""" % (BASE_DP1_CONFIG, BASE_DP_CONFIG, BASE_DP_CONFIG) class ValveTestBases: @@ -2143,11 +2143,11 @@ def test_port_add_input(self): def test_dp_acl_deny(self): """Test DP acl denies forwarding""" - acl_config = f""" + acl_config = """ dps: s1: dp_acls: [drop_non_ospf_ipv4] -{DP1_CONFIG} +%s interfaces: p2: number: 2 @@ -2185,7 +2185,7 @@ def test_dp_acl_deny(self): rate: 1 } ] -""" +""" % DP1_CONFIG drop_match = { 'in_port': 2, @@ -2208,11 +2208,11 @@ def test_dp_acl_deny(self): def test_dp_acl_deny_ordered(self): """Test DP acl denies forwarding""" - acl_config = f""" + acl_config = """ dps: s1: dp_acls: [drop_non_ospf_ipv4] -{DP1_CONFIG} +%s interfaces: p2: number: 2 @@ -2250,7 +2250,7 @@ def test_dp_acl_deny_ordered(self): rate: 1 } ] -""" +""" % DP1_CONFIG drop_match = { 'in_port': 2, @@ -2273,10 +2273,10 @@ def test_dp_acl_deny_ordered(self): def test_port_acl_deny(self): """Test that port ACL denies forwarding.""" - acl_config = f""" + acl_config = """ dps: s1: -{DP1_CONFIG} +%s interfaces: p2: number: 2 @@ -2312,7 +2312,7 @@ def test_port_acl_deny(self): rate: 1 } ] -""" +""" % DP1_CONFIG drop_match = { 'in_port': 2, diff --git a/faucet/faucet_dot1x.py b/faucet/faucet_dot1x.py index 7be265f6dc..b774cdb43b 100644 --- a/faucet/faucet_dot1x.py +++ b/faucet/faucet_dot1x.py @@ -92,10 +92,10 @@ def _get_acls(self, datapath): # Loggin Methods def log_auth_event(self, valve, port_num, mac_str, status): """Log an authentication attempt event""" - self.metrics.inc_var('dp_dot1x_{}'.format(status), valve.dp.base_prom_labels()) - self.metrics.inc_var('port_dot1x_{}'.format(status), valve.dp.port_labels(port_num)) + self.metrics.inc_var(f'dp_dot1x_{status}', valve.dp.base_prom_labels()) + self.metrics.inc_var(f'port_dot1x_{status}', valve.dp.port_labels(port_num)) self.logger.info( - '{} from MAC {} on {}'.format(status.capitalize(), mac_str, port_num)) + f'{status.capitalize()} from MAC {mac_str} on {port_num}') valve.dot1x_event({'AUTHENTICATION': {'dp_id': valve.dp.dp_id, 'port': port_num, 'eth_src': mac_str, @@ -314,8 +314,7 @@ def reset(self, valves): for dot1x_port in valve.dp.dot1x_ports(): self.set_mac_str(valve, valve_index, dot1x_port.number) self.logger.info( - 'dot1x enabled on %s (%s) port %s, NFV interface %s' % ( - valve.dp, valve_index, dot1x_port, dot1x_intf)) + f'dot1x enabled on {valve.dp} ({valve_index}) port {dot1x_port}, NFV interface {dot1x_intf}') valve.dot1x_event({'ENABLED': {'dp_id': valve.dp.dp_id}}) @@ -347,15 +346,13 @@ def _add_authenticated_flowmod(self, dot1x_port, valve, # pylint: disable=too-m acl = valve.dp.acls.get(acl_name, None) if dot1x_port.dot1x_dyn_acl and acl: - self.logger.info("DOT1X_DYN_ACL: Adding ACL '{0}' for port '{1}'".format( - acl_name, port_num)) - self.logger.debug("DOT1X_DYN_ACL: ACL contents: '{0}'".format(str(acl.__dict__))) + self.logger.info(f"DOT1X_DYN_ACL: Adding ACL '{acl_name}' for port '{port_num}'") + self.logger.debug(f"DOT1X_DYN_ACL: ACL contents: '{str(acl.__dict__)}'") flowmods.extend(acl_manager.add_port_acl(acl, port_num, mac_str)) elif dot1x_port.dot1x_acl: auth_acl, _ = self._get_acls(valve.dp) - self.logger.info("DOT1X_PRE_ACL: Adding ACL '{0}' for port '{1}'".format( - acl_name, port_num)) - self.logger.debug("DOT1X_PRE_ACL: ACL contents: '{0}'".format(str(auth_acl.__dict__))) + self.logger.info(f"DOT1X_PRE_ACL: Adding ACL '{acl_name}' for port '{port_num}'") + self.logger.debug(f"DOT1X_PRE_ACL: ACL contents: '{str(auth_acl.__dict__)}'") flowmods.extend(acl_manager.add_port_acl(auth_acl, port_num, mac_str)) else: flowmods.extend(acl_manager.add_authed_mac(port_num, mac_str)) From 78598dcd0ef3711afd969b71d4ed8628c4e04f8a Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 17:01:42 -0700 Subject: [PATCH 079/231] typo --- faucet/dp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/dp.py b/faucet/dp.py index a0e02d667c..1611682847 100644 --- a/faucet/dp.py +++ b/faucet/dp.py @@ -1279,7 +1279,7 @@ def _get_conf_changes(logger, conf_name, subconf, new_subconf, diff=False, ignor if added_confs: logger.info(f'{conf_name}s added: {added_confs}') if changed_confs: - logger.info(f'{conf_name}s changed: {changed_congs}') + logger.info(f'{conf_name}s changed: {changed_confs}') else: logger.info(f'no {conf_name} changes') From 34d285da9ea449d35e6792ab3dfe8f23a66c2e60 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 17:05:01 -0700 Subject: [PATCH 080/231] move to f-strings for faucet pipeline --- faucet/faucet_pipeline.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/faucet/faucet_pipeline.py b/faucet/faucet_pipeline.py index 37f396704d..c0dc30fb23 100644 --- a/faucet/faucet_pipeline.py +++ b/faucet/faucet_pipeline.py @@ -49,10 +49,10 @@ def __init__(self, name, table_id, # pylint: disable=too-many-arguments def __str__(self): field_strs = ' '.join([ - '%s: %s' % (key, val) + f'{key}: {val}' for key, val in sorted(self.__dict__.items()) if val]) - return 'table config %s' % field_strs + return f'table config {field_strs}' def __repr__(self): return self.__str__() @@ -73,9 +73,9 @@ def __lt__(self, other): def _fib_table(ipv, table_id): return ValveTableConfig( - 'ipv%u_fib' % ipv, + f'ipv{ipv}_fib', table_id, - match_types=(('eth_type', False), ('ipv%u_dst' % ipv, True), ('vlan_vid', False)), + match_types=(('eth_type', False), (f'ipv{ipv}_dst', True), ('vlan_vid', False)), set_fields=('eth_dst', 'eth_src', 'vlan_vid'), dec_ttl=True, vlan_port_scale=3.1, From cb39198e7d5f4b5f63862085c3225e11580346d2 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 17:10:02 -0700 Subject: [PATCH 081/231] move to f-strings for gauge influx --- faucet/gauge_influx.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/faucet/gauge_influx.py b/faucet/gauge_influx.py index 91054f0ecd..8b04d5b8df 100644 --- a/faucet/gauge_influx.py +++ b/faucet/gauge_influx.py @@ -47,13 +47,13 @@ def ship_points(self, points): if client.write_points(points=points, time_precision='s'): return True self.logger.warning( - '%s failed to update InfluxDB' % self.ship_error_prefix) + f'{self.ship_error_prefix} failed to update InfluxDB') else: self.logger.warning( - '%s error connecting to InfluxDB' % self.ship_error_prefix) + f'{self.ship_error_prefix} error connecting to InfluxDB') except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, InfluxDBClientError, InfluxDBServerError) as err: - self.logger.warning('%s %s' % (self.ship_error_prefix, err)) + self.logger.warning(f'{self.ship_error_prefix} {err}') return False @staticmethod From 9cc14339ff9f8d2e51cdddb314576b31a7df21c8 Mon Sep 17 00:00:00 2001 From: cglewis Date: Fri, 17 Sep 2021 17:30:47 -0700 Subject: [PATCH 082/231] more pylint cleanup --- clib/mininet_test_base.py | 2 +- faucet/port.py | 26 ++++++++++++-------------- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index b74e86570a..a67fb7f0a2 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -2468,7 +2468,7 @@ def add_host_ipv6_address(self, host, ip_v6, intf=None): def add_host_route(self, host, ip_dst, ip_gw): """Add an IP route to a Mininet host.""" host.cmd(f'ip -{ip_dst.version} route del {ip_dst.network.with_prefixlen}') - add_cmd = f'ip -{ip_dst.version} route add {ip_dst.network.with_prefixle} via {ip_gw}' + add_cmd = f'ip -{ip_dst.version} route add {ip_dst.network.with_prefixlen} via {ip_gw}' self.quiet_commands(host, (add_cmd,)) def _ip_ping(self, host, dst, retries, timeout=500, diff --git a/faucet/port.py b/faucet/port.py index 3060cd736a..c6196544fb 100644 --- a/faucet/port.py +++ b/faucet/port.py @@ -290,7 +290,7 @@ def __init__(self, _id, dp_id, conf=None): self.mirror = [self.mirror] def __str__(self): - return 'Port %u' % self.number + return f'Port {self.number}' def __repr__(self): return self.__str__() @@ -308,7 +308,7 @@ def clone_dyn_state(self, prev_port): def stack_descr(self): """"Return stacking annotation if this is a stacking port.""" if self.stack: - return 'remote DP %s %s' % (self.stack['dp'].name, self.stack['port']) + return f'remote DP {self.stack["dp"].name} {self.stack["port"]}' return '' def set_defaults(self): @@ -321,27 +321,26 @@ def set_defaults(self): def check_config(self): super().check_config() test_config_condition(not (isinstance(self.number, int) and self.number > 0 and ( - not valve_of.ignore_port(self.number))), ('Port number invalid: %s' % self.number)) + not valve_of.ignore_port(self.number))), (f'Port number invalid: {self.number}')) non_vlan_options = {'stack', 'mirror', 'coprocessor', 'output_only'} vlan_agnostic_options = {'enabled', 'number', 'name', 'description', 'max_lldp_lost'} vlan_port = self.tagged_vlans or self.native_vlan non_vlan_port_options = {option for option in non_vlan_options if getattr(self, option)} test_config_condition( vlan_port and non_vlan_port_options, - 'cannot have VLANs configured on non-VLAN ports: %s' % self) + f'cannot have VLANs configured on non-VLAN ports: {self}') if self.output_only: test_config_condition( not non_vlan_port_options.issubset({'mirror', 'output_only'}), - 'output_only can only coexist with mirror option on same port %s' % self) + f'output_only can only coexist with mirror option on same port {self}') elif self.mirror: test_config_condition( not non_vlan_port_options.issubset({'mirror', 'coprocessor'}), - 'coprocessor can only coexist with mirror option on same port %s' % self) + f'coprocessor can only coexist with mirror option on same port {self}') else: test_config_condition( len(non_vlan_port_options) > 1, - 'cannot have multiple non-VLAN port options %s on same port: %s' % ( - non_vlan_port_options, self)) + f'cannot have multiple non-VLAN port options {non_vlan_port_options} on same port: {self}') if non_vlan_port_options: for key, default_val in self.defaults.items(): if key in vlan_agnostic_options or key in non_vlan_port_options: @@ -351,7 +350,7 @@ def check_config(self): val = getattr(self, key) test_config_condition( val != default_val and val, - 'Cannot have VLAN option %s: %s on non-VLAN port %s' % (key, val, self)) + f'Cannot have VLAN option {key}: {val} on non-VLAN port {self}') test_config_condition( self.hairpin and self.hairpin_unicast, 'Cannot have both hairpin and hairpin_unicast enabled') @@ -360,7 +359,7 @@ def check_config(self): if dot1x_feature.startswith('dot1x_') and dot1x_config} test_config_condition( dot1x_features and not self.dot1x, - '802.1x features %s require port to have dot1x enabled' % dot1x_features) + f'802.1x features {dot1x_features} require port to have dot1x enabled') if self.dot1x: test_config_condition(self.number > 65535, ( '802.1x not supported on ports > 65535')) @@ -380,7 +379,7 @@ def check_config(self): self._check_conf_types(self.stack, self.stack_defaults_types) for stack_config in list(self.stack_defaults_types.keys()): test_config_condition(stack_config not in self.stack, ( - 'stack %s must be defined' % stack_config)) + f'stack {stack_config} must be defined')) # LLDP always enabled for stack ports. self.receive_lldp = True if not self.lldp_beacon_enabled(): @@ -395,7 +394,7 @@ def check_config(self): ('lacp port priority must be at least 0 and less than 256')) if self.lldp_peer_mac: test_config_condition(not netaddr.valid_mac(self.lldp_peer_mac), ( - 'invalid MAC address %s' % self.lldp_peer_mac)) + f'invalid MAC address {self.lldp_peer_mac}')) if self.lldp_beacon: self._check_conf_types( self.lldp_beacon, self.lldp_beacon_defaults_types) @@ -713,8 +712,7 @@ def stack_port_update(self, now): if stack_timed_out: # Stack timed out, too many packets lost self.stack_gone() - reason = 'too many (%u) packets lost, last received %us ago' % ( - num_lost_lldp, time_since_lldp_seen) + reason = f'too many ({num_lost_lldp}) packets lost, last received {time_since_lldp_seen}s ago' elif not stack_correct: # Stack bad due to incorrect cabling self.stack_bad() From 51110882a0481e1f71ebc8aa241aabedfea0430c Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 19 Sep 2021 22:21:57 +0000 Subject: [PATCH 083/231] No 3.6. --- .github/workflows/tests-unit.yml | 2 +- setup.cfg | 1 - setup.py | 8 ++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 2c62fe3d25..6a61de487c 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9] steps: - name: Checkout repo uses: actions/checkout@v2 diff --git a/setup.cfg b/setup.cfg index 02dd9964df..9c1fe91699 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,6 @@ classifier = Topic :: System :: Networking Natural Language :: English Programming Language :: Python - Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 diff --git a/setup.py b/setup.py index e9a6f96a48..39ce7c91fc 100755 --- a/setup.py +++ b/setup.py @@ -15,15 +15,15 @@ if sys.version_info < (3,): print("""You are trying to install faucet on python {py} -Faucet is not compatible with python 2, please upgrade to python 3.5 or newer.""" +Faucet is not compatible with python 2, please upgrade to python 3.7 or newer.""" .format(py='.'.join([str(v) for v in sys.version_info[:3]])), file=sys.stderr) sys.exit(1) -elif sys.version_info < (3, 5): +elif sys.version_info < (3, 7): print("""You are trying to install faucet on python {py} Faucet 1.9.0 and above are no longer compatible with older versions of python 3. -Please upgrade to python 3.5 or newer.""" +Please upgrade to python 3.7 or newer.""" .format(py='.'.join([str(v) for v in sys.version_info[:3]]))) sys.exit(1) @@ -90,7 +90,7 @@ def setup_faucet_log(): setup( name='faucet', setup_requires=['pbr>=1.9', 'setuptools>=17.1'], - python_requires='>=3.5', + python_requires='>=3.7', pbr=True ) From 2b1d2be18b514f7f6d151c9fd3a3e5f0fe324d62 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 10:33:36 -0700 Subject: [PATCH 084/231] add back f-strings check for pylint --- .pylintrc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.pylintrc b/.pylintrc index 23c405180a..a9ee9e1ac3 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,9 +1,7 @@ [MASTER] -# TODO: remove consider-using-f-string when python3.5 is gone. disable= fixme, - import-error, - consider-using-f-string + import-error [FORMAT] max-line-length=120 From d1249bd5c8ea33d56af4ca06b03b11d4bca65973 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 10:58:05 -0700 Subject: [PATCH 085/231] more f-string fixes for pylint --- clib/valve_test_lib.py | 16 +++++++++------- faucet/valve_lldp.py | 19 ++++++------------- 2 files changed, 15 insertions(+), 20 deletions(-) diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index 4c6a750065..5ebf3cd244 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -2605,18 +2605,20 @@ def base_config(self): def create_config(self): """Create the config file""" - self.CONFIG = f""" + self.CONFIG = """ vlans: vlan100: vid: 0x100 - faucet_mac: '{self.VLAN100_FAUCET_MAC}' - faucet_vips: ['{self.VLAN100_FAUCET_VIP_SPACE}'] + faucet_mac: '%s' + faucet_vips: ['%s'] vlan200: vid: 0x200 - faucet_mac: '{self.VLAN200_FAUCET_MAC}' - faucet_vips: ['{self.VLAN200_FAUCET_VIP_SPACE}'] - {self.base_config()} - """ + faucet_mac: '%s' + faucet_vips: ['%s'] + %s + """ % (self.VLAN100_FAUCET_MAC, self.VLAN100_FAUCET_VIP_SPACE, + self.VLAN200_FAUCET_MAC, self.VLAN200_FAUCET_VIP_SPACE, + self.base_config()) def setup_stack_routing(self): """Create a stacking config file.""" diff --git a/faucet/valve_lldp.py b/faucet/valve_lldp.py index a88cdd62a7..e5886dca8d 100644 --- a/faucet/valve_lldp.py +++ b/faucet/valve_lldp.py @@ -91,14 +91,9 @@ def verify_lldp(self, port, now, valve, other_valves, or remote_dp_name != remote_dp.name or remote_port_id != remote_port.number): self.logger.error( - 'Stack %s cabling incorrect, expected %s:%s:%u, actual %s:%s:%u' % ( - port, - valve_util.dpid_log(remote_dp.dp_id), - remote_dp.name, - remote_port.number, - valve_util.dpid_log(remote_dp_id), - remote_dp_name, - remote_port_id)) + f'Stack {port} cabling incorrect, expected ' \ + f'{valve_util.dpid_log(remote_dp.dp_id)}:{remote_dp.name}:{remote_port.number}, ' \ + f'actual {valve_util.dpid_log(remote_dp_id)}:{remote_dp_name}:{remote_port_id}') stack_correct = False self._inc_var('stack_cabling_errors') port.dyn_stack_probe_info = { @@ -138,9 +133,8 @@ def update_stack_link_state(self, ports, now, valve, other_valves): self.notify({'STACK_STATE': { 'port': port.number, 'state': after_state}}) - self.logger.info('Stack %s state %s (previous state %s): %s' % ( - port, port.stack_state_name(after_state), - port.stack_state_name(before_state), reason)) + self.logger.info(f'Stack {port} state {port.stack_state_name(after_state)} ' \ + f'(previous state {port.stack_state_name(before_state)}): {reason}') stack_changes += 1 port_up = False if port.is_stack_up(): @@ -150,8 +144,7 @@ def update_stack_link_state(self, ports, now, valve, other_valves): for stack_valve in stacked_valves: stack_valve.stack_manager.update_stack_topo(port_up, valve.dp, port) if stack_changes or valve.stale_root: - self.logger.info('%u stack ports changed state, stale root %s' % - (stack_changes, valve.stale_root)) + self.logger.info(f'{stack_changes} stack ports changed state, stale root {valve.stale_root}') valve.stale_root = False notify_dps = {} for stack_valve in stacked_valves: From b5d75915a2b860d6438d78c1d66fa736b2525d06 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 11:06:55 -0700 Subject: [PATCH 086/231] move to f-strings for faucet stack --- faucet/stack.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/faucet/stack.py b/faucet/stack.py index 768c948e88..020d900e09 100644 --- a/faucet/stack.py +++ b/faucet/stack.py @@ -188,11 +188,11 @@ def update_health(self, now, dp_last_live_time, update_time): if not timeout_healthy: # Too long since DP last running, if DP not running then # number of UP stack or LACP ports should be 0 - reason += 'last running %us ago (timeout %us)' % (now - last_live_time, health_timeout) + reason += f'last running {now - last_live_time}s ago (timeout {health_timeout}s)' self.dyn_healthy_info = (False, 0.0, 0.0) self.dyn_healthy = False return self.dyn_healthy, reason - reason += 'running %us ago' % (now - last_live_time) + reason += f'running {now - last_live_time}s ago' if reason: reason += ', ' stack_ports_healthy, stack_percentage = self.stack_port_healthy() @@ -258,8 +258,7 @@ def resolve_topology(self, dps, meta_dp_state): for dp in stack_priority_dps: test_config_condition(not isinstance(dp.stack.priority, int), ( - 'stack priority must be type %s not %s' % ( - int, type(dp.stack.priority)))) + f'stack priority must be type {int} not {type(dp.stack.priority)}')) test_config_condition(dp.stack.priority <= 0, ( 'stack priority must be > 0')) @@ -285,13 +284,13 @@ def resolve_topology(self, dps, meta_dp_state): edge_name = Stack.modify_topology(graph, dp, port) edge_count[edge_name] += 1 for edge_name, count in edge_count.items(): - test_config_condition(count != 2, '%s defined only in one direction' % edge_name) + test_config_condition(count != 2, f'{edge_name} defined only in one direction') if graph.size() and self.name in graph: self.graph = graph for dp in graph.nodes(): path_to_root_len = len(self.shortest_path(self.root_name, src_dp=dp)) test_config_condition( - path_to_root_len == 0, '%s not connected to stack' % dp) + path_to_root_len == 0, f'{dp} not connected to stack') root_len = self.longest_path_to_root_len() if root_len is not None and root_len > 2: self.root_flood_reflection = True @@ -314,9 +313,7 @@ def canonical_edge(dp, port): def make_edge_name(edge_a, edge_z): edge_a_dp, edge_a_port = edge_a edge_z_dp, edge_z_port = edge_z - return '%s:%s-%s:%s' % ( - edge_a_dp.name, edge_a_port.name, - edge_z_dp.name, edge_z_port.name) + return f'{edge_a_dp.name}:{edge_a_port.name}-{edge_z_dp.name}:{edge_z_port.name}' def make_edge_attr(edge_a, edge_z): edge_a_dp, edge_a_port = edge_a From 972e08b4a1cf2bce4b897173b7b9ae17ce65fc91 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 11:40:19 -0700 Subject: [PATCH 087/231] more f-strings for pylint --- clib/mininet_test_base.py | 2 +- faucet/valve.py | 101 +++++++++++++++++--------------------- 2 files changed, 46 insertions(+), 57 deletions(-) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index a67fb7f0a2..f521d18cb6 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -1815,7 +1815,7 @@ def verify_unicast(self, hosts, unicast_expected=True, packets=3): def verify_empty_caps(self, cap_files): cap_file_cmds = [ - f'tcpdump -n -v -A -r {cap_file for cap_file in cap_files} 2> /dev/null'] + 'tcpdump -n -v -A -r %s 2> /dev/null' % cap_file for cap_file in cap_files] self.quiet_commands(self.net.controllers[0], cap_file_cmds) def verify_no_bcast_to_self(self, timeout=3): diff --git a/faucet/valve.py b/faucet/valve.py index 65962bb074..9d978a9b08 100644 --- a/faucet/valve.py +++ b/faucet/valve.py @@ -253,11 +253,11 @@ def dp_init(self, new_dp=None, valves=None): for ipv, route_manager_class, neighbor_timeout in ( (4, valve_route.ValveIPv4RouteManager, self.dp.arp_neighbor_timeout), (6, valve_route.ValveIPv6RouteManager, self.dp.nd_neighbor_timeout)): - fib_table_name = 'ipv%u_fib' % ipv + fib_table_name = f'ipv{ipv}_fib' if fib_table_name not in self.dp.tables: continue fib_table = self.dp.tables[fib_table_name] - proactive_learn = getattr(self.dp, 'proactive_learn_v%u' % ipv) + proactive_learn = getattr(self.dp, f'proactive_learn_v{ipv}') route_manager = route_manager_class( self.logger, self.notify, self.dp.global_vlan, neighbor_timeout, self.dp.max_hosts_per_resolve_cycle, @@ -270,8 +270,7 @@ def dp_init(self, new_dp=None, valves=None): if vlan.faucet_vips_by_ipv(route_manager.IPV): route_manager.active = True vips_str = list(str(vip) for vip in vlan.faucet_vips_by_ipv(route_manager.IPV)) - self.logger.info('IPv%u routing is active on %s with VIPs %s' % ( - route_manager.IPV, vlan, vips_str)) + self.logger.info(f'IPv{route_manager.IPV} routing is active on {vlan} with VIPs {vips_str}') for eth_type in route_manager.CONTROL_ETH_TYPES: self._route_manager_by_eth_type[eth_type] = route_manager self._managers = tuple( @@ -313,8 +312,7 @@ def ofchannel_log(self, ofmsgs): self.dp.ofchannel_log, logging.DEBUG, 0) - log_prefix = '%u %s' % ( - len(ofmsgs), valve_util.dpid_log(self.dp.dp_id)) + log_prefix = f'{len(ofmsgs)} {valve_util.dpid_log(self.dp.dp_id)}' for i, ofmsg in enumerate(ofmsgs, start=1): self.ofchannel_logger.debug( '%u/%s %s', i, log_prefix, ofmsg) @@ -382,7 +380,7 @@ def _add_default_flows(self): def add_vlan(self, vlan, cold_start=False): """Configure a VLAN.""" - self.logger.info('Configuring %s' % vlan) + self.logger.info(f'Configuring {vlan}') ofmsgs = [] if vlan.reserved_internal_vlan: return ofmsgs @@ -398,7 +396,7 @@ def add_vlans(self, vlans, cold_start=False): def del_vlan(self, vlan): """Delete a configured VLAN.""" - self.logger.info('Delete VLAN %s' % vlan) + self.logger.info(f'Delete VLAN {vlan}') ofmsgs = [] for manager in self._managers: ofmsgs.extend(manager.del_vlan(vlan)) @@ -511,8 +509,7 @@ def port_desc_stats_reply_handler(self, port_desc_stats, _other_valves, now): def _fabricate(port_no, reason, status): self.logger.info( - 'Port %s fabricating %s status %s' % - (port_no, Valve._decode_port_status(reason), status)) + f'Port {port_no} fabricating {Valve._decode_port_status(reason)} status {status}') _ofmsgs_by_valve = self.port_status_handler( port_no, reason, 0 if status else valve_of.ofp.OFPPS_LINK_DOWN, @@ -535,12 +532,10 @@ def _fabricate(port_no, reason, status): if conf_port_nos != curr_dyn_port_nos: self.logger.info( - 'delta in known ports: conf %s dyn %s' % - (conf_port_nos, curr_dyn_port_nos)) + f'delta in known ports: conf {conf_port_nos} dyn {curr_dyn_port_nos}') if prev_dyn_up_port_nos != curr_dyn_up_port_nos: self.logger.info( - 'delta in up state: %s => %s' % - (prev_dyn_up_port_nos, curr_dyn_up_port_nos)) + f'delta in up state: {prev_dyn_up_port_nos} => {curr_dyn_up_port_nos}') # Ports we have no config for for port_no in no_conf_port_nos: @@ -587,8 +582,7 @@ def port_status_handler(self, port_no, reason, state, _other_valves, now): if not port.opstatus_reconf: return {} if reason not in Valve._port_status_codes: - self.logger.warning('Unhandled port status %s/state %s for %s' % ( - reason, state, port)) + self.logger.warning(f'Unhandled port status {reason}/state {state} for {port}') return {} ofmsgs_by_valve = {self: []} @@ -599,25 +593,24 @@ def port_status_handler(self, port_no, reason, state, _other_valves, now): (state & valve_of.ofp.OFPPS_BLOCKED) or (state & valve_of.ofp.OFPPS_LINK_DOWN)) live_state = state & valve_of.ofp.OFPPS_LIVE decoded_reason = Valve._decode_port_status(reason) - state_description = '%s up status %s reason %s state %s' % ( - port, port_status, decoded_reason, state) + state_description = f'{port} up status {port_status} reason {decoded_reason} state {state}' ofmsgs = [] if new_port_status != port.dyn_phys_up: - self.logger.info('status change: %s' % state_description) + self.logger.info(f'status change: {state_description}') if new_port_status: ofmsgs = self.port_add(port_no) else: ofmsgs = self.port_delete(port_no, keep_cache=True, other_valves=_other_valves) else: - self.logger.info('status did not change: %s' % state_description) + self.logger.info(f'status did not change: {state_description}') if new_port_status: if blocked_down_state: self.logger.info( - '%s state down or blocked despite status up, setting to status down' % port) + f'{port} state down or blocked despite status up, setting to status down') ofmsgs = self.port_delete(port_no, keep_cache=True, other_valves=_other_valves) if not live_state: self.logger.info( - '%s state OFPPS_LIVE reset, ignoring in expectation of port down' % port) + f'{port} state OFPPS_LIVE reset, ignoring in expectation of port down') ofmsgs_by_valve[self].extend(ofmsgs) return ofmsgs_by_valve @@ -692,7 +685,7 @@ def fast_state_expire(self, now, other_valves): if port.dyn_lldp_beacon_recv_state: age = now - port.dyn_lldp_beacon_recv_time if age > self.dp.lldp_beacon['send_interval'] * port.max_lldp_lost: - self.logger.info('LLDP for %s inactive after %us' % (port, age)) + self.logger.info(f'LLDP for {port} inactive after {age}s') port.dyn_lldp_beacon_recv_state = None return self._lldp_manager.update_stack_link_state( self.dp.stack_ports(), now, self, other_valves) @@ -764,7 +757,7 @@ def ports_add(self, port_nums, cold_start=False, log_msg='up'): continue port = self.dp.ports[port_num] port.dyn_phys_up = True - self.logger.info('%s (%s) %s' % (port, port.description, log_msg)) + self.logger.info(f'{port} ({port.description}) {log_msg}') if not port.running(): continue @@ -813,7 +806,7 @@ def ports_delete(self, port_nums, log_msg='down', keep_cache=False, continue port = self.dp.ports[port_num] port.dyn_phys_up = False - self.logger.info('%s (%s) %s' % (port, port.description, log_msg)) + self.logger.info(f'{port} ({port.description}) {log_msg}') # now is set to a time value only when ports_delete is called to flush if now: @@ -904,25 +897,25 @@ def lldp_handler(self, now, pkt_meta, other_valves): if port.dyn_lldp_beacon_recv_state != remote_port_state: chassis_id = str(self.dp.faucet_dp_mac) if remote_port_state: - self.logger.info('LLDP on %s, %s from %s (remote %s, port %u) state %s' % ( - chassis_id, port, pkt_meta.eth_src, valve_util.dpid_log(remote_dp_id), - remote_port_id, port.stack_state_name(remote_port_state))) + self.logger.info(f'LLDP on {chassis_id}, {port} from {pkt_meta.eth_src} ' \ + f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})' \ + f' state {port.stack_state_name(remote_port_state)}') port.dyn_lldp_beacon_recv_state = remote_port_state peer_mac_src = self.dp.ports[port.number].lldp_peer_mac if peer_mac_src and peer_mac_src != pkt_meta.eth_src: - self.logger.warning('Unexpected LLDP peer. Received pkt from %s instead of %s' % ( - pkt_meta.eth_src, peer_mac_src)) + self.logger.warning(f'Unexpected LLDP peer. Received pkt from {pkt_meta.eth_src} ' \ + f'instead of {peer_mac_src}') ofmsgs_by_valve = {} if remote_dp_id and remote_port_id: - self.logger.debug('FAUCET LLDP on %s from %s (remote %s, port %u)' % ( - port, pkt_meta.eth_src, valve_util.dpid_log(remote_dp_id), remote_port_id)) + self.logger.debug(f'FAUCET LLDP on {port} from {pkt_meta.eth_src} ' \ + f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})') ofmsgs_by_valve.update(self._lldp_manager.verify_lldp( port, now, self, other_valves, remote_dp_id, remote_dp_name, remote_port_id, remote_port_state)) else: - self.logger.debug('LLDP on %s from %s: %s' % (port, pkt_meta.eth_src, str(lldp_pkt))) + self.logger.debug(f'LLDP on {port} from {pkt_meta.eth_src}: {str(lldp_pkt)}') return ofmsgs_by_valve @staticmethod @@ -977,19 +970,19 @@ def learn_host(self, now, pkt_meta, other_valves): pkt_meta.vlan.add_cache_host(pkt_meta.eth_src, learn_port, now) if pkt_meta.l3_pkt is None: pkt_meta.reparse_ip() - learn_log = 'L2 learned on %s %s (%u hosts total)' % ( - learn_port, pkt_meta.log(), pkt_meta.vlan.hosts_count()) + learn_log = f'L2 learned on {learn_port} {pkt_meta.log()} ' \ + f'({pkt_meta.vlan.hosts_count()} hosts total)' stack_descr = None if pkt_meta.port.stack: stack_descr = pkt_meta.port.stack_descr() - learn_log += ' from %s' % stack_descr + learn_log += f' from {stack_descr}' previous_port_no = None if previous_port is not None: previous_port_no = previous_port.number if pkt_meta.port.number != previous_port_no: - learn_log += ', moved from %s' % previous_port + learn_log += f', moved from {previous_port}' if previous_port.stack: - learn_log += ' from %s' % previous_port.stack_descr() + learn_log += f' from {previous_port.stack_descr()}' self.logger.info(learn_log) learn_labels = dict(self.dp.base_prom_labels(), vid=pkt_meta.vlan.vid, eth_src=pkt_meta.eth_src) @@ -1045,7 +1038,7 @@ def parse_pkt_meta(self, msg): if not self.dp.dyn_running: return None if self.dp.strict_packet_in_cookie and self.dp.cookie != msg.cookie: - self.logger.info('got packet in with unknown cookie %s' % msg.cookie) + self.logger.info(f'got packet in with unknown cookie {msg.cookie}') return None # Drop any packet we didn't specifically ask for if msg.reason != valve_of.ofp.OFPR_ACTION: @@ -1066,34 +1059,31 @@ def parse_pkt_meta(self, msg): data, max_len=valve_packet.ETH_VLAN_HEADER_SIZE) if pkt is None or eth_pkt is None: self.logger.info( - 'unparseable packet from port %u' % in_port) + f'unparseable packet from port {in_port}') return None if (vlan_vid is not None and vlan_vid not in self.dp.vlans and vlan_vid != self.dp.global_vlan): self.logger.info( - 'packet for unknown VLAN %u' % vlan_vid) + f'packet for unknown VLAN {vlan_vid}') return None pkt_meta = self.parse_rcv_packet( in_port, vlan_vid, eth_type, data, msg.total_len, pkt, eth_pkt, vlan_pkt) if not valve_packet.mac_addr_is_unicast(pkt_meta.eth_src): self.logger.info( - 'packet with non-unicast eth_src %s port %u' % ( - pkt_meta.eth_src, in_port)) + f'packet with non-unicast eth_src {pkt_meta.eth_src} port {in_port}') return None if valve_packet.mac_addr_all_zeros(pkt_meta.eth_src): self.logger.info( - 'packet with all zeros eth_src %s port %u' % ( - pkt_meta.eth_src, in_port)) + f'packet with all zeros eth_src {pkt_meta.eth_src} port {in_port}') return None if self.dp.stack and self.dp.stack.graph: if (not pkt_meta.port.stack and pkt_meta.vlan and pkt_meta.vlan not in pkt_meta.port.tagged_vlans and pkt_meta.vlan != pkt_meta.port.native_vlan): - self.logger.warning( - ('packet from non-stack port number %u is not member of VLAN %u' % ( - pkt_meta.port.number, pkt_meta.vlan.vid))) + self.logger.warning(f'packet from non-stack port number ' \ + f'{pkt_meta.port.number} is not member of VLAN {pkt_meta.vlan.vid}') return None return pkt_meta @@ -1268,7 +1258,7 @@ def _lacp_state_expire(self, now, _other_valves): for port in ports_up: lacp_age = now - port.dyn_lacp_updated_time if lacp_age > self.dp.lacp_timeout: - self.logger.info('LAG %s %s expired (age %u)' % (lag, port, lacp_age)) + self.logger.info(f'LAG {lag} {port} expired (age {lacp_age})') ofmsgs_by_valve[self].extend(self.lacp_update( port, False, now=now, other_valves=_other_valves)) return ofmsgs_by_valve @@ -1312,7 +1302,7 @@ def _pipeline_diff(self, new_dp): new_pipeline = new_dp.pipeline_str().splitlines() differ = difflib.Differ() diff = '\n'.join(differ.compare(old_pipeline, new_pipeline)) - self.logger.info('pipeline change: %s' % diff) + self.logger.info(f'pipeline change: {diff}') def _pipeline_change(self, new_dp): if new_dp: @@ -1322,8 +1312,7 @@ def _pipeline_change(self, new_dp): old_table_ids = self.dp.pipeline_tableids() new_table_ids = new_dp.pipeline_tableids() if old_table_ids != new_table_ids: - self.logger.info('table IDs changed, old %s new %s' % - (old_table_ids, new_table_ids)) + self.logger.info(f'table IDs changed, old {old_table_ids} new {new_table_ids}') return True return False @@ -1448,8 +1437,8 @@ def reload_config(self, _now, new_dp, valves=None): restart_type, ofmsgs = self._apply_config_changes( new_dp, self.dp.get_config_changes(self.logger, new_dp), valves) if restart_type is not None: - self._inc_var('faucet_config_reload_%s' % restart_type) - self.logger.info('%s starting' % restart_type) + self._inc_var(f'faucet_config_reload_{restart_type}') + self.logger.info(f'{restart_type} starting') if restart_type == 'cold': self.logger.info('forcing DP reconnection to ensure ports are synchronized') ofmsgs = None @@ -1525,7 +1514,7 @@ def oferror(self, msg): orig_msgs = [orig_msg for orig_msg in self.recent_ofmsgs if orig_msg.xid == msg.xid] error_txt = msg if orig_msgs: - error_txt = '%s caused by %s' % (error_txt, orig_msgs[0]) + error_txt = f'{error_txt} caused by {orig_msgs[0]}' error_type = 'UNKNOWN' error_code = 'UNKNOWN' try: @@ -1552,7 +1541,7 @@ def oferror(self, msg): # Same scenario as groups. return self._inc_var('of_errors') - self.logger.error('OFError type: %s code: %s %s' % (error_type, error_code, error_txt)) + self.logger.error(f'OFError type: {error_type} code: {error_code} {error_text}') def prepare_send_flows(self, flow_msgs): """Prepare to send flows to datapath. From b68bffeb0b8c4a3335b24602ac02ba02da98cb98 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 11:46:48 -0700 Subject: [PATCH 088/231] move to f-strings for valve packet --- faucet/valve_packet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/valve_packet.py b/faucet/valve_packet.py index f559c2ed95..853deda9b4 100644 --- a/faucet/valve_packet.py +++ b/faucet/valve_packet.py @@ -760,7 +760,7 @@ def __init__(self, data, orig_len, pkt, eth_pkt, vlan_pkt, port, valve_vlan, def log(self): vlan_msg = '' if self.vlan: - vlan_msg = 'VLAN %u' % self.vlan.vid + vlan_msg = f'VLAN {self.vlan.vid}' return '%s (L2 type 0x%4.4x, L2 dst %s, L3 src %s, L3 dst %s) %s %s' % ( self.eth_src, self.eth_type, self.eth_dst, self.l3_src, self.l3_dst, self.port, vlan_msg) From 3805b14f7536981f2f34e41c8106c513cc0699d2 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 11:48:56 -0700 Subject: [PATCH 089/231] txt not text --- faucet/valve.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/valve.py b/faucet/valve.py index 9d978a9b08..a2b0b98995 100644 --- a/faucet/valve.py +++ b/faucet/valve.py @@ -1541,7 +1541,7 @@ def oferror(self, msg): # Same scenario as groups. return self._inc_var('of_errors') - self.logger.error(f'OFError type: {error_type} code: {error_code} {error_text}') + self.logger.error(f'OFError type: {error_type} code: {error_code} {error_txt}') def prepare_send_flows(self, flow_msgs): """Prepare to send flows to datapath. From a9a6d66f3b881e072d8c4a34ac5a73033fd12f01 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 12:08:43 -0700 Subject: [PATCH 090/231] move to f-strings for valve switch standalone --- faucet/valve_switch_standalone.py | 50 ++++++++++++++----------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/faucet/valve_switch_standalone.py b/faucet/valve_switch_standalone.py index 4584319eb9..bfbecc08a1 100644 --- a/faucet/valve_switch_standalone.py +++ b/faucet/valve_switch_standalone.py @@ -485,20 +485,18 @@ def ban_rules(self, pkt_meta): self.eth_src_table.match(in_port=port.number))) port.dyn_learn_ban_count += 1 self.logger.info( - 'max hosts %u reached on %s, ' - 'temporarily banning learning on this port, ' - 'and not learning %s' % ( - port.max_hosts, port, eth_src)) + f'max hosts {port.max_hosts} reached on {port}, ' \ + f'temporarily banning learning on this port, ' \ + f'and not learning {eth_src}') if vlan is not None and vlan.max_hosts: hosts_count = vlan.hosts_count() if hosts_count == vlan.max_hosts: ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(vlan=vlan))) vlan.dyn_learn_ban_count += 1 self.logger.info( - 'max hosts %u reached on VLAN %u, ' - 'temporarily banning learning on this VLAN, ' - 'and not learning %s on %s' % ( - vlan.max_hosts, vlan.vid, eth_src, port)) + f'max hosts {vlan.max_hosts} reached on VLAN {vlan.vid}, ' \ + f'temporarily banning learning on this VLAN, ' \ + f'and not learning {eth_src} on {port}') return ofmsgs def _temp_ban_host_learning(self, match): @@ -522,8 +520,7 @@ def expire_hosts_from_vlan(self, vlan, now): if expired_hosts: vlan.dyn_last_time_hosts_expired = now self.logger.info( - '%u recently active hosts on VLAN %u, expired %s' % ( - vlan.hosts_count(), vlan.vid, expired_hosts)) + f'{vlan.hosts_count()} recently active hosts on VLAN {vlan.vid}, expired {expired_hosts}') return expired_hosts def _jitter_learn_timeout(self, base_learn_timeout, port, eth_dst): @@ -693,8 +690,8 @@ def _loop_protect_check(self, entry, vlan, now, eth_src, port, ofmsgs, # pylint if port != cache_port and cache_age < self.cache_update_guard_time: learn_ban = True port.dyn_learn_ban_count += 1 - self.logger.info('rapid move of %s from %s to %s, temp loop ban %s' % ( - eth_src, cache_port, port, port)) + self.logger.info(f'rapid move of {eth_src} from {cache_port} ' \ + f'to {port}, temp loop ban {port}') # already, or newly in protect mode, apply the ban rules. if learn_ban: @@ -776,9 +773,9 @@ def lacp_update_actor_state(self, port, lacp_up, now=None, lacp_pkt=None, cold_s lacp_up, now=now, lacp_pkt=lacp_pkt, cold_start=cold_start) if prev_actor_state != new_actor_state: - self.logger.info('LAG %u %s actor state %s (previous state %s)' % ( - port.lacp, port, port.actor_state_name(new_actor_state), - port.actor_state_name(prev_actor_state))) + self.logger.info(f'LAG {port.lacp} {port} actor state ' \ + f'{port.actor_state_name(new_actor_state)} (previous state ' \ + f'{port.actor_state_name(prev_actor_state)})') return prev_actor_state != new_actor_state def enable_forwarding(self, port): @@ -811,8 +808,8 @@ def lacp_req_reply(self, lacp_pkt, port): for peer_num in port.lacp_passthrough: lacp_peer = self.ports.get(peer_num, None) if not lacp_peer.dyn_lacp_up: - self.logger.warning('Suppressing LACP LAG %s on %s, peer %s link is down' % - (port.lacp, port, lacp_peer)) + self.logger.warning(f'Suppressing LACP LAG {port.lacp} on ' \ + f'{port}, peer {lacp_peer} link is down') return [] actor_state_activity = 0 if port.lacp_active: @@ -842,7 +839,7 @@ def lacp_req_reply(self, lacp_pkt, port): actor_state_activity=actor_state_activity, actor_state_collecting=actor_state_col, actor_state_distributing=actor_state_dist) - self.logger.debug('Sending LACP %s on %s activity %s' % (pkt, port, actor_state_activity)) + self.logger.debug(f'Sending LACP {pkt} on {port} activity {actor_state_activity}') return [valve_of.packetout(port.number, bytes(pkt.data))] @staticmethod @@ -876,9 +873,9 @@ def lacp_update_port_selection_state(self, port, valve, other_valves=None, cold_ prev_state = port.lacp_port_state() new_state = port.lacp_port_update(valve.dp.dp_id == nominated_dpid, cold_start=cold_start) if new_state != prev_state: - self.logger.info('LAG %u %s %s (previous state %s)' % ( - port.lacp, port, port.port_role_name(new_state), - port.port_role_name(prev_state))) + self.logger.info(f'LAG {port.lacp} {port} ' \ + f'{port.port_role_name(new_state)} ' \ + f'(previous state{port.port_role_name(prev_state)})') return new_state != prev_state def lacp_handler(self, now, pkt_meta, valve, other_valves, lacp_update): @@ -902,7 +899,7 @@ def lacp_handler(self, now, pkt_meta, valve, other_valves, lacp_update): pkt_meta.reparse_all() lacp_pkt = valve_packet.parse_lacp_pkt(pkt_meta.pkt) if lacp_pkt: - self.logger.debug('receive LACP %s on %s' % (lacp_pkt, pkt_meta.port)) + self.logger.debug(f'receive LACP {lacp_pkt} on {pkt_meta.port}') # Respond to new LACP packet or if we haven't sent anything in a while age = None if pkt_meta.port.dyn_lacp_last_resp_time: @@ -928,9 +925,8 @@ def lacp_handler(self, now, pkt_meta, valve, other_valves, lacp_update): other_actor_system = other_lag_port.dyn_last_lacp_pkt.actor_system if actor_system != other_actor_system: self.logger.error( - 'LACP actor system mismatch %s: %s, %s %s' % ( - pkt_meta.port, actor_system, - other_lag_port, other_actor_system)) + f'LACP actor system mismatch {pkt_meta.port}: ' \ + f'{actor_system}, {other_lag_port} {other_actor_system}') return ofmsgs_by_valve @staticmethod @@ -992,7 +988,7 @@ def _src_rule_expire(self, vlan, port, eth_src): entry = vlan.cached_host_on_port(eth_src, port) if entry is not None: vlan.expire_cache_host(eth_src) - self.logger.info('expired src_rule for host %s' % eth_src) + self.logger.info(f'expired src_rule for host {eth_src}') return ofmsgs def _dst_rule_expire(self, now, vlan, eth_dst): @@ -1005,5 +1001,5 @@ def _dst_rule_expire(self, now, vlan, eth_dst): ofmsgs.extend(self.learn_host_on_vlan_ports( now, entry.port, vlan, eth_dst, delete_existing=False)) self.logger.info( - 'refreshing host %s from VLAN %u' % (eth_dst, vlan.vid)) + f'refreshing host {eth_dst} from VLAN {vlan.vid}') return ofmsgs From 2483a97364044082ecf202d0ef9e963c8b9c2e43 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 12:31:50 -0700 Subject: [PATCH 091/231] move to f-strings for valve table --- faucet/valve_table.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/faucet/valve_table.py b/faucet/valve_table.py index db6e792e6b..d2cd7aeb76 100644 --- a/faucet/valve_table.py +++ b/faucet/valve_table.py @@ -50,8 +50,7 @@ def __init__(self, name, table_config, def goto(self, next_table): """Add goto next table instruction.""" assert next_table.name in self.table_config.next_tables, ( - '%s not configured as next table in %s' % ( - next_table.name, self.name)) + f'{next_table.name} not configured as next table in {self.name}') return valve_of.goto_table(next_table) def goto_this(self): @@ -60,8 +59,7 @@ def goto_this(self): def goto_miss(self, next_table): """Add miss goto table instruction.""" assert next_table.name == self.table_config.miss_goto, ( - '%s not configured as miss table in %s' % ( - next_table.name, self.name)) + f'{next_table.name} not configured as miss table in {self.name}') return valve_of.goto_table(next_table) @staticmethod @@ -110,25 +108,25 @@ def _verify_flowmod(self, flowmod): if self.table_id != valve_of.ofp.OFPTT_ALL: for match_type, match_field in match_fields: assert match_type in self.match_types, ( - '%s match in table %s' % (match_type, self.name)) + f'{match_type} match in table {self.name}') else: # TODO: ACL builder should not use ALL table. if self.table_id == valve_of.ofp.OFPTT_ALL: return assert not (flowmod.priority == 0 and match_fields), ( - 'default flow cannot have matches on table %s: %s' % (self.name, flowmod)) + f'default flow cannot have matches on table {self.name}: {flowmod}') for match_type, match_field in match_fields: assert match_type in self.match_types, ( - '%s match in table %s' % (match_type, self.name)) + f'{match_type} match in table {self.name}') config_mask = self.match_types[match_type] flow_mask = isinstance(match_field, tuple) assert config_mask or (not config_mask and not flow_mask), ( - '%s configured mask %s but flow mask %s in table %s (%s)' % ( - match_type, config_mask, flow_mask, self.name, flowmod)) + f'{match_type} configured mask {config_mask} but flow mask ' \ + f'{flow_mask} in table {self.name} ({flowmod})') if self.exact_match and match_fields: assert len(self.match_types) == len(match_fields), ( - 'exact match table %s matches %s do not match flow matches %s (%s)' % ( - self.name, self.match_types, match_fields, flowmod)) + f'exact match table {self.name} matches {self.match_types} ' \ + f'do not match flow matches {match_fields} ({flowmod})') def _trim_actions(self, actions): new_actions = [] @@ -143,9 +141,7 @@ def _trim_actions(self, actions): set_fields = {action.key for action in new_actions if valve_of.is_set_field(action)} if self.table_id != valve_of.ofp.OFPTT_ALL and set_fields: assert set_fields.issubset(self.set_fields), ( - 'unexpected set fields %s configured %s in %s' % (set_fields, - self.set_fields, - self.name)) + f'unexpected set fields {set_fields} configured {self.set_fields} in {self.name}') return new_actions @functools.lru_cache() From 73830b2bf5eac7c9539d5a8bf8a99caca22e8910 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 12:36:52 -0700 Subject: [PATCH 092/231] move to f-strings for watcher conf --- faucet/watcher_conf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/faucet/watcher_conf.py b/faucet/watcher_conf.py index 4cf08f7794..3f04d4db87 100644 --- a/faucet/watcher_conf.py +++ b/faucet/watcher_conf.py @@ -182,10 +182,10 @@ def add_db(self, db_conf): test_config_condition( self.file is not None and not (os.path.dirname(self.file) and os.access(os.path.dirname(self.file), os.W_OK)), - '%s is not writable' % self.file) + f'{self.file} is not writable') test_config_condition( self.path is not None and not os.access(self.path, os.W_OK), - '%s is not writable' % self.file) + f'{self.file} is not writable') def add_dp(self, dp): """Add a datapath to this watcher.""" @@ -201,4 +201,4 @@ def check_config(self): valid_types = {'flow_table', 'port_stats', 'port_state', 'meter_stats'} test_config_condition( self.type not in valid_types, - 'type %s not one of %s' % (self.type, valid_types)) + f'type {self.type} not one of {valid_types}') From c9466b687d4dfcc068290ede4b896cc6383e192c Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 12:45:11 -0700 Subject: [PATCH 093/231] move to f-strings for watcher --- faucet/watcher.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/faucet/watcher.py b/faucet/watcher.py index dfb0a97130..d79ab26e66 100644 --- a/faucet/watcher.py +++ b/faucet/watcher.py @@ -80,18 +80,18 @@ def _update(self, rcv_time, msg): rcv_time_str = self._rcv_time(rcv_time) reason = msg.reason port_no = msg.desc.port_no - log_msg = 'port %s unknown state %s' % (port_no, reason) + log_msg = f'port {port_no} unknown state {reason}' if reason == ofp.OFPPR_ADD: - log_msg = 'port %s added' % port_no + log_msg = f'port {port_no} added' elif reason == ofp.OFPPR_DELETE: - log_msg = 'port %s deleted' % port_no + log_msg = f'port {port_no} deleted' elif reason == ofp.OFPPR_MODIFY: link_down = (msg.desc.state & ofp.OFPPS_LINK_DOWN) if link_down: - log_msg = 'port %s down' % port_no + log_msg = f'port {port_no} down' else: - log_msg = 'port %s up' % port_no - log_msg = '%s %s' % (dpid_log(self.dp.dp_id), log_msg) + log_msg = f'port {port_no} up' + log_msg = f'{dpid_log(self.dp.dp_id)} {log_msg}' self.logger.info(log_msg) if self.conf.file: with open(self.conf.file, 'a', encoding='utf-8') as logfile: @@ -152,15 +152,14 @@ def _update(self, rcv_time, msg): # Double Hyphen to avoid confusion with ISO8601 times filename = os.path.join( path, - "{}--flowtable--{}.json".format(self.dp.name, rcv_time_str) + f"{self.dp.name}--flowtable--{rcv_time_str}.json" ) if os.path.isfile(filename): # If this filename already exists, add an increment to the filename # (for dealing with parts of a multipart message arriving at the same time) inc = 1 while os.path.isfile(filename): - filename = os.path.join(path, "{}--flowtable--{}--{}.json".format( - self.dp.name, rcv_time_str, inc)) + filename = os.path.join(path, f"{self.dp.name}--flowtable--{rcv_time_str}--{inc}.json") inc += 1 if self.conf.compress: From 3b1ea2ed676ff183090918677a8ad6d90f8cb52d Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:02:24 -0700 Subject: [PATCH 094/231] more f-strings for pylint --- clib/mininet_test_topo.py | 2 +- setup.py | 17 ++++++++--------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index 28814ccdca..01554b4501 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -85,7 +85,7 @@ def create_dnsmasq(self, tmpdir, iprange, router, vlan, interface=None): opts += f' --interface={interface}' opts += f' --dhcp-leasefile={dhcp_leasefile}' opts += f' --log-facility={log_facility}' - opts += f' --pid-file=pid_file' + opts += f' --pid-file={pid_file}' opts += ' --conf-file=' return self.cmd(cmd + opts) diff --git a/setup.py b/setup.py index 83a19a7ed1..ddfc68f017 100755 --- a/setup.py +++ b/setup.py @@ -43,19 +43,19 @@ def install_configs(): def setup_ryu_conf(): if not os.path.exists(dst_ryu_conf_dir): - print("Creating %s" % dst_ryu_conf_dir) + print(f"Creating {dst_ryu_conf_dir}") os.makedirs(dst_ryu_conf_dir) if not os.path.isfile(dst_ryu_conf): if os.path.exists(old_ryu_conf) and os.path.isfile(old_ryu_conf): - print("Migrating %s to %s" % (old_ryu_conf, dst_ryu_conf)) + print(f"Migrating {old_ryu_conf} to {dst_ryu_conf}") shutil.copy(old_ryu_conf, dst_ryu_conf) else: - print("Copying %s to %s" % (src_ryu_conf, dst_ryu_conf)) + print(f"Copying {src_ryu_conf} to {dst_ryu_conf}") shutil.copy(src_ryu_conf, dst_ryu_conf) def setup_faucet_conf(): if not os.path.exists(dst_faucet_conf_dir): - print("Creating %s" % dst_faucet_conf_dir) + print(f"Creating {dst_faucet_conf_dir}") os.makedirs(dst_faucet_conf_dir) for file_name in os.listdir(src_faucet_conf_dir): src_file = os.path.join(src_faucet_conf_dir, file_name) @@ -64,15 +64,15 @@ def setup_faucet_conf(): if os.path.isfile(dst_file): continue if os.path.isfile(alt_src): - print("Migrating %s to %s" % (alt_src, dst_file)) + print(f"Migrating {alt_src} to {dst_file}") shutil.copy(alt_src, dst_file) elif os.path.isfile(src_file): - print("Copying %s to %s" % (src_file, dst_file)) + print(f"Copying {src_file} to {dst_file}") shutil.copy(src_file, dst_file) def setup_faucet_log(): if not os.path.exists(faucet_log_dir): - print("Creating %s" % faucet_log_dir) + print(f"Creating {faucet_log_dir}") os.makedirs(faucet_log_dir) try: @@ -81,8 +81,7 @@ def setup_faucet_log(): setup_faucet_log() except OSError as exception: if exception.errno == errno.EACCES: - print("Permission denied creating %s, skipping copying configs" - % exception.filename) + print(f"Permission denied creating {exception.filename}, skipping copying configs") else: raise From 688352df2b2307b3f41bf85e642288c3f72f38a5 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:08:53 -0700 Subject: [PATCH 095/231] move to f-strings for generative fuzzer conf dict --- tests/generative/fuzzer/config/generate_dict.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/generative/fuzzer/config/generate_dict.py b/tests/generative/fuzzer/config/generate_dict.py index 96308a08c5..e5d2db70e9 100644 --- a/tests/generative/fuzzer/config/generate_dict.py +++ b/tests/generative/fuzzer/config/generate_dict.py @@ -41,8 +41,8 @@ def create_config_dict(file_name): or rev_to_write in bogus_values or value in bogus_values): continue - config_file.write('\n"%s"' % to_write) - config_file.write('\n"%s"' % rev_to_write) + config_file.write(f'\n"{to_write}"') + config_file.write(f'\n"{rev_to_write"') # Find CONF objects config file options for conf_obj in [ACL, Meter, Port, Router, DP, VLAN]: for value in conf_obj.defaults: @@ -53,8 +53,8 @@ def create_config_dict(file_name): or rev_to_write in bogus_values or value in bogus_values): continue - config_file.write('\n"%s"' % to_write) - config_file.write('\n"%s"' % rev_to_write) + config_file.write(f'\n"{to_write}"') + config_file.write(f'\n"{rev_to_write}"') def create_examples(self, file_base, file_name): """Generate some initial starting configs by generating them via the config_generator""" @@ -107,7 +107,7 @@ def create_config(network_graph, stack=True): for stack in (True, False): configs.append(create_config((graph), stack=stack)) for config in configs: - ex_fn = os.path.join(file_base, '%s_%s' % (file_name, ex_curr)) + ex_fn = os.path.join(file_base, f'{file_name}_{ex_curr}') with open(ex_fn, 'w+', encoding='utf-8') as ex_file: ex_file.write(config) ex_curr += 1 From d052a328d041375f75be01c31e830931575b086a Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:18:49 -0700 Subject: [PATCH 096/231] missing } --- tests/generative/fuzzer/config/generate_dict.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/generative/fuzzer/config/generate_dict.py b/tests/generative/fuzzer/config/generate_dict.py index e5d2db70e9..2eb7abf689 100644 --- a/tests/generative/fuzzer/config/generate_dict.py +++ b/tests/generative/fuzzer/config/generate_dict.py @@ -42,7 +42,7 @@ def create_config_dict(file_name): or value in bogus_values): continue config_file.write(f'\n"{to_write}"') - config_file.write(f'\n"{rev_to_write"') + config_file.write(f'\n"{rev_to_write}"') # Find CONF objects config file options for conf_obj in [ACL, Meter, Port, Router, DP, VLAN]: for value in conf_obj.defaults: From 050f95e02dad58ad8cbfaa781ac204257d304558 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:30:42 -0700 Subject: [PATCH 097/231] move raw strings to f-strings --- tests/generative/fuzzer/config/generate_dict.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/generative/fuzzer/config/generate_dict.py b/tests/generative/fuzzer/config/generate_dict.py index 2eb7abf689..0742fc092a 100644 --- a/tests/generative/fuzzer/config/generate_dict.py +++ b/tests/generative/fuzzer/config/generate_dict.py @@ -31,12 +31,12 @@ def create_config_dict(file_name): bogus_values = [] for value in config_file.readlines(): # Remove quotes and \n from bogus value to get the true bogus value - bogus_values.append(r'%s' % value[1:2]) + bogus_values.append(fr'{value[1:2]}') # Make sure to add head values into the dictionary for value in V2_TOP_CONFS: for bogus in bogus_values: - to_write = r'%s%s' % (value, bogus) - rev_to_write = r'%s%s' % (bogus, value) + to_write = fr'{value}{bogus}' + rev_to_write = fr'{bogus}{value}' if (to_write in bogus_values or rev_to_write in bogus_values or value in bogus_values): @@ -47,8 +47,8 @@ def create_config_dict(file_name): for conf_obj in [ACL, Meter, Port, Router, DP, VLAN]: for value in conf_obj.defaults: for bogus in bogus_values: - to_write = r'%s%s' % (value, bogus) - rev_to_write = r'%s%s' % (bogus, value) + to_write = fr'{value}{bogus}' + rev_to_write = fr'{bogus}{value}' if (to_write in bogus_values or rev_to_write in bogus_values or value in bogus_values): From a113aceba9f250dd02d4bf5173bba03d12bd725b Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:49:42 -0700 Subject: [PATCH 098/231] move to f-strings for test fctl --- tests/unit/faucet/test_fctl.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tests/unit/faucet/test_fctl.py b/tests/unit/faucet/test_fctl.py index 0c2b3788da..1ef9ce723d 100755 --- a/tests/unit/faucet/test_fctl.py +++ b/tests/unit/faucet/test_fctl.py @@ -61,7 +61,7 @@ def tearDown(self): def fctl_args(self, extra_args=None): """generate argument list for fctl""" result = copy.copy(self.FCTL_BASE_ARGS) - result += ['--endpoints=file:%s' % self.prom_input_file_name] + result += [f'--endpoints=file:{self.prom_input_file_name}'] if extra_args is not None: result += extra_args return result @@ -96,8 +96,7 @@ def run_fctl(self, prom_input, expected_output, extra_args=None): fctl_cli = ' '.join( ['python3', self.FCTL] + self.fctl_args(extra_args)) retcode, output = subprocess.getstatusoutput(fctl_cli) # pytype: disable=module-attr - self.assertEqual(0, retcode, msg='%s returned %d' % ( - fctl_cli, retcode)) + self.assertEqual(0, retcode, msg=f'{fctl_cli} returned {retcode}') output = output.strip() self.assertEqual(output, expected_output) @@ -145,7 +144,7 @@ def test_bad_content(self): self.assertEqual( None, fctl.scrape_prometheus( - ['file://%s' % bad_input_file_name], err_output_file=err_output_file)) + [f'file://{bad_input_file_name}'], err_output_file=err_output_file)) def write_prom_input_file(self, input_data): with open(self.prom_input_file_name, 'w', encoding='utf-8') as prom_input_file: From 589645cb307554bf2a55ed5e1e8e1f8ec29a566f Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 13:59:32 -0700 Subject: [PATCH 099/231] move to f-strings for test valve dot1x --- tests/unit/faucet/test_valve_dot1x.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tests/unit/faucet/test_valve_dot1x.py b/tests/unit/faucet/test_valve_dot1x.py index 564e0cab49..47454c1277 100755 --- a/tests/unit/faucet/test_valve_dot1x.py +++ b/tests/unit/faucet/test_valve_dot1x.py @@ -27,10 +27,10 @@ class ValveDot1xSmokeTestCase(ValveTestBases.ValveTestNetwork): """Smoke test to check dot1x can be initialized.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DOT1X_CONFIG} interfaces: p1: number: 1 @@ -46,7 +46,7 @@ class ValveDot1xSmokeTestCase(ValveTestBases.ValveTestNetwork): vid: 0x200 dot1x_assigned: True -""" % DOT1X_CONFIG +""" def setUp(self): """Setup basic 802.1x config""" @@ -84,11 +84,11 @@ class ValveDot1xACLSmokeTestCase(ValveDot1xSmokeTestCase): allow: 0 """ - CONFIG = """ -{} + CONFIG = f""" +{ACL_CONFIG} dps: s1: -{} +{DOT1X_ACL_CONFIG} interfaces: p1: number: 1 @@ -104,16 +104,16 @@ class ValveDot1xACLSmokeTestCase(ValveDot1xSmokeTestCase): student: vid: 0x200 dot1x_assigned: True -""".format(ACL_CONFIG, DOT1X_ACL_CONFIG) +""" class ValveDot1xMABSmokeTestCase(ValveDot1xSmokeTestCase): """Smoke test to check dot1x can be initialized with dot1x MAB.""" - CONFIG = """ + CONFIG = f""" dps: s1: -{} +{DOT1X_CONFIG} interfaces: p1: number: 1 @@ -126,12 +126,12 @@ class ValveDot1xMABSmokeTestCase(ValveDot1xSmokeTestCase): vlans: v100: vid: 0x100 -""".format(DOT1X_CONFIG) +""" class ValveDot1xDynACLSmokeTestCase(ValveDot1xSmokeTestCase): """Smoke test to check dot1x can be initialized with dynamic dot1x ACLs.""" - CONFIG = """ + CONFIG = f""" acls: accept_acl: dot1x_assigned: True @@ -147,7 +147,7 @@ class ValveDot1xDynACLSmokeTestCase(ValveDot1xSmokeTestCase): allow: True dps: s1: -%s +{DOT1X_CONFIG} interfaces: p1: number: 1 @@ -161,7 +161,7 @@ class ValveDot1xDynACLSmokeTestCase(ValveDot1xSmokeTestCase): vlans: v100: vid: 0x100 -""" % DOT1X_CONFIG +""" def setUp(self): self.setup_valves(self.CONFIG) From a0788e0a6a43db6b379db541f9cb0840b523e799 Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 14:20:42 -0700 Subject: [PATCH 100/231] move to f-strings for test valve config --- tests/unit/faucet/test_valve_config.py | 172 ++++++++++++------------- 1 file changed, 86 insertions(+), 86 deletions(-) diff --git a/tests/unit/faucet/test_valve_config.py b/tests/unit/faucet/test_valve_config.py index e7fb515ed5..5c640413c2 100755 --- a/tests/unit/faucet/test_valve_config.py +++ b/tests/unit/faucet/test_valve_config.py @@ -38,16 +38,16 @@ class ValveIncludeTestCase(ValveTestBases.ValveTestNetwork): """Test include optional files.""" - CONFIG = """ + CONFIG = f""" include-optional: ['/does/not/exist/'] dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 native_vlan: 0x100 -""" % DP1_CONFIG +""" def setUp(self): """Setup config with non-existent optional include file""" @@ -61,20 +61,20 @@ def test_include_optional(self): class ValveBadConfTestCase(ValveTestBases.ValveTestNetwork): """Test recovery from a bad config file.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 native_vlan: 0x100 -""" % DP1_CONFIG +""" - MORE_CONFIG = """ + MORE_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -82,7 +82,7 @@ class ValveBadConfTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x100 -""" % DP1_CONFIG +""" BAD_CONFIG = """ dps: {} @@ -107,16 +107,16 @@ def test_bad_conf(self): self.assertEqual( load_error, self.get_prom('faucet_config_load_error', bare=True), - msg='%u: %s' % (load_error, config)) + msg=f'{load_error}: {config}') class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork): """Test changes to config on ports.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -125,12 +125,12 @@ class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork): number: 2 native_vlan: 0x200 permanent_learn: True -""" % DP1_CONFIG +""" - LESS_CONFIG = """ + LESS_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -139,7 +139,7 @@ class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork): number: 2 native_vlan: 0x200 permanent_learn: False -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -163,10 +163,10 @@ def test_delete_permanent_learn(self): class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork): """Test deletion of a port.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -177,12 +177,12 @@ class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork): p3: number: 3 tagged_vlans: [0x100] -""" % DP1_CONFIG +""" - LESS_CONFIG = """ + LESS_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -190,7 +190,7 @@ class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 tagged_vlans: [0x100] -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -204,10 +204,10 @@ def test_port_delete(self): class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork): """Test addition of port mirroring does not cause a del VLAN.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -218,12 +218,12 @@ class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork): p3: number: 3 output_only: true -""" % DP1_CONFIG +""" - MORE_CONFIG = """ + MORE_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -235,7 +235,7 @@ class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork): number: 3 output_only: true mirror: [1] -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -249,10 +249,10 @@ def test_port_mirror(self): class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork): """Test addition of a port.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -260,12 +260,12 @@ class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 tagged_vlans: [0x100] -""" % DP1_CONFIG +""" - MORE_CONFIG = """ + MORE_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -276,7 +276,7 @@ class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork): p3: number: 3 tagged_vlans: [0x100] -""" % DP1_CONFIG +""" @staticmethod def _inport_flows(in_port, ofmsgs): @@ -393,10 +393,10 @@ def test_port_add_traffic(self): class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork): """Test change of port VLAN only is a warm start.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 9 @@ -410,12 +410,12 @@ class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork): p4: number: 14 native_vlan: 0x200 -""" % DP1_CONFIG +""" - WARM_CONFIG = """ + WARM_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 9 @@ -429,7 +429,7 @@ class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork): p4: number: 14 native_vlan: 0x300 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -461,10 +461,10 @@ def verify_func(): class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork): """Test deleting VLAN.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -472,12 +472,12 @@ class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x200 -""" % DP1_CONFIG +""" - LESS_CONFIG = """ + LESS_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -485,7 +485,7 @@ class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x200 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -499,10 +499,10 @@ def test_delete_vlan(self): class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork): """Test changing DP.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} priority_offset: 4321 interfaces: p1: @@ -511,12 +511,12 @@ class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x100 -""" % DP1_CONFIG +""" - NEW_CONFIG = """ + NEW_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} priority_offset: 1234 interfaces: p1: @@ -525,7 +525,7 @@ class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x100 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config with priority offset""" @@ -539,10 +539,10 @@ def test_change_dp(self): class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork): """Test adding VLAN.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -550,12 +550,12 @@ class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 tagged_vlans: [0x100] -""" % DP1_CONFIG +""" - MORE_CONFIG = """ + MORE_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -563,7 +563,7 @@ class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 tagged_vlans: [0x100, 0x300] -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -577,7 +577,7 @@ def test_add_vlan(self): class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): """Test changes to ACL on a port.""" - CONFIG = """ + CONFIG = f""" acls: acl_same_a: - rule: @@ -593,7 +593,7 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): allow: 0 dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -602,9 +602,9 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x200 -""" % DP1_CONFIG +""" - SAME_CONTENT_CONFIG = """ + SAME_CONTENT_CONFIG = f""" acls: acl_same_a: - rule: @@ -620,7 +620,7 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): allow: 0 dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -629,9 +629,9 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x200 -""" % DP1_CONFIG +""" - DIFF_CONTENT_CONFIG = """ + DIFF_CONTENT_CONFIG = f""" acls: acl_same_a: - rule: @@ -647,7 +647,7 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): allow: 0 dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -656,7 +656,7 @@ class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): p2: number: 2 native_vlan: 0x200 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic ACL config""" @@ -690,10 +690,10 @@ def verify_func(): class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork): """Test changes mirroring port.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -704,12 +704,12 @@ class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork): p3: number: 3 native_vlan: 0x200 -""" % DP1_CONFIG +""" - MIRROR_CONFIG = """ + MIRROR_CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -720,7 +720,7 @@ class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork): p3: number: 3 native_vlan: 0x200 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -763,10 +763,10 @@ def setUp(self): def test_vlan_acl_deny(self): """Test VLAN ACL denies a packet.""" - acl_config = """ + acl_config = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -803,7 +803,7 @@ def test_vlan_acl_deny(self): dl_type: 0x800 actions: allow: 0 -""" % DP1_CONFIG +""" drop_match = { 'in_port': 2, @@ -976,15 +976,15 @@ def verify_func(): class ValveReloadConfigProfile(ValveTestBases.ValveTestNetwork): """Test reload processing time.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{BASE_DP1_CONFIG} interfaces: p1: number: 1 native_vlan: 0x100 -""" % BASE_DP1_CONFIG +""" NUM_PORTS = 100 baseline_total_tt = None @@ -1028,16 +1028,16 @@ def load_orig_config(): return time.sleep(i) - self.fail('%f: %s' % (total_tt_prop, pstats_text)) + self.fail('{total_tt_prop}: {pstats_text}') class ValveTestVLANRef(ValveTestBases.ValveTestNetwork): """Test reference to same VLAN by name or VID.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -1048,7 +1048,7 @@ class ValveTestVLANRef(ValveTestBases.ValveTestNetwork): vlans: threes: vid: 333 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -1065,15 +1065,15 @@ def test_vlan_refs(self): class ValveTestConfigHash(ValveTestBases.ValveTestNetwork): """Verify faucet_config_hash_info update after config change""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 native_vlan: 0x100 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" From a6bbab7e1afb92c166e3bb2b9077bb90ec84d8aa Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 20 Sep 2021 14:42:28 -0700 Subject: [PATCH 101/231] move to f-strings for test valve --- tests/unit/faucet/test_valve.py | 60 ++++++++++++++++----------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/tests/unit/faucet/test_valve.py b/tests/unit/faucet/test_valve.py index 2bc82a993d..1fcd44af63 100644 --- a/tests/unit/faucet/test_valve.py +++ b/tests/unit/faucet/test_valve.py @@ -47,15 +47,15 @@ class ValveTestCase(ValveTestBases.ValveTestBig): # pylint: disable=too-few-pub class ValveFuzzTestCase(ValveTestBases.ValveTestNetwork): """Test unknown ports/VLANs.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 native_vlan: 0x100 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -153,10 +153,10 @@ def test_output(self): class ValveRestBcastTestCase(ValveTestBases.ValveTestNetwork): """Test restricted broadcast.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -169,7 +169,7 @@ class ValveRestBcastTestCase(ValveTestBases.ValveTestNetwork): number: 3 native_vlan: 0x100 restricted_bcast_arpnd: true -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config with restricted broadcast enabled""" @@ -271,10 +271,10 @@ def test_oferror_parser(self): class ValveGroupTestCase(ValveTestBases.ValveTestNetwork): """Tests for datapath with group support.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{GROUP_DP1_CONFIG} interfaces: p1: number: 1 @@ -294,7 +294,7 @@ class ValveGroupTestCase(ValveTestBases.ValveTestNetwork): vid: 0x100 v200: vid: 0x200 -""" % GROUP_DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -335,10 +335,10 @@ def test_unknown_eth_dst_rule(self): class ValveIdleLearnTestCase(ValveTestBases.ValveTestNetwork): """Smoke test for idle-flow based learning. This feature is not currently reliable.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{IDLE_DP1_CONFIG} interfaces: p1: number: 1 @@ -362,7 +362,7 @@ class ValveIdleLearnTestCase(ValveTestBases.ValveTestNetwork): vid: 0x100 v200: vid: 0x200 -""" % IDLE_DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config with mirroring""" @@ -411,10 +411,10 @@ def test_host_learn_coldstart(self): class ValveLACPTestCase(ValveTestBases.ValveTestNetwork): """Test LACP.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} lacp_timeout: 5 interfaces: p1: @@ -441,7 +441,7 @@ class ValveLACPTestCase(ValveTestBases.ValveTestNetwork): vid: 0x200 v300: vid: 0x300 -""" % DP1_CONFIG +""" def setUp(self): """Setup lacp config and activate ports""" @@ -553,10 +553,10 @@ def test_dp_disconnect(self): class ValveTFMSizeOverride(ValveTestBases.ValveTestNetwork): """Test TFM size override.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} table_sizes: eth_src: 999 interfaces: @@ -566,7 +566,7 @@ class ValveTFMSizeOverride(ValveTestBases.ValveTestNetwork): vlans: v100: vid: 0x100 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config with overriden TFM sizing""" @@ -586,10 +586,10 @@ class ValveTFMSize(ValveTestBases.ValveTestNetwork): NUM_PORTS = 128 - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} lacp_timeout: 5 interfaces: p1: @@ -620,7 +620,7 @@ class ValveTFMSize(ValveTestBases.ValveTestNetwork): vid: 0x200 v300: vid: 0x300 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic port and vlan config""" @@ -638,10 +638,10 @@ def test_size(self): class ValveActiveLACPTestCase(ValveTestBases.ValveTestNetwork): """Test LACP.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} lacp_timeout: 5 interfaces: p1: @@ -669,7 +669,7 @@ class ValveActiveLACPTestCase(ValveTestBases.ValveTestNetwork): vid: 0x200 v300: vid: 0x300 -""" % DP1_CONFIG +""" def setUp(self): """Setup basic lacp config and activate ports""" @@ -805,7 +805,7 @@ class ValveMirrorTestCase(ValveTestBases.ValveTestBig): """Test ACL and interface mirroring.""" # TODO: check mirror packets are present/correct - CONFIG = """ + CONFIG = f""" acls: mirror_ospf: - rule: @@ -823,7 +823,7 @@ class ValveMirrorTestCase(ValveTestBases.ValveTestBig): allow: 1 dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -879,7 +879,7 @@ class ValveMirrorTestCase(ValveTestBases.ValveTestBig): server_addresses: ['127.0.0.1'] neighbor_addresses: ['127.0.0.1'] vlan: v100 -""" % DP1_CONFIG +""" def setUp(self): """Setup complex config with routing, bgp, mirroring and ACLs""" @@ -894,10 +894,10 @@ def test_unmirror(self): class ValvePortDescTestCase(ValveTestBases.ValveTestNetwork): """Test OFPMP_PORT_DESC reply handling.""" - CONFIG = """ + CONFIG = f""" dps: s1: -%s +{DP1_CONFIG} interfaces: p1: number: 1 @@ -910,7 +910,7 @@ class ValvePortDescTestCase(ValveTestBases.ValveTestNetwork): vid: 0x100 v200: vid: 0x200 -""" % DP1_CONFIG +""" def setUp(self): """Setup simple configuration with no ports up""" From dcb7cf5aafb1331d33d6a3a2ad280e80369a2576 Mon Sep 17 00:00:00 2001 From: cglewis Date: Tue, 21 Sep 2021 12:27:52 -0700 Subject: [PATCH 102/231] move to f-strings for mininet tests --- tests/integration/mininet_tests.py | 469 ++++++++++++++--------------- 1 file changed, 228 insertions(+), 241 deletions(-) diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index dcf8a9ec6b..5bd20b418d 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -260,7 +260,7 @@ class Faucet8021XBase(FaucetTest): def _priv_mac(host_id): two_byte_port_num = '%04x' % host_id two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:])) - return '00:00:00:00:%s' % two_byte_port_num_formatted + return f'00:00:00:00:{two_byte_port_num_formatted}' def pre_start_net(self): self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered() @@ -313,7 +313,7 @@ def setUp(self): def tearDown(self, ignore_oferrors=False): for pid in self.nfv_pids: - self.nfv_host.cmd('kill %u' % pid) + self.nfv_host.cmd(f'kill {pid}') super().tearDown(ignore_oferrors=ignore_oferrors) def post_test_checks(self): @@ -358,8 +358,7 @@ def insert_dynamic_values(dot1x_expected_events): for expected_event in dot1x_expected_events: self.assertTrue(expected_event in events_that_happened, - msg='expected event: {} not in events_that_happened {}'.format( - expected_event, events_that_happened)) + msg=f'expected event: {expected_event} not in events_that_happened {events_that_happened}') @staticmethod def _eapol_filter(fields): @@ -369,7 +368,7 @@ def _success_eapol_filter(self, expect_success): eap_code = '0x04' if expect_success: eap_code = '0x03' - return self._eapol_filter(('ether[14:4] == 0x01000004', 'ether[18] == %s' % eap_code)) + return self._eapol_filter(('ether[14:4] == 0x01000004', f'ether[18] == {eap_code}')) def _logoff_eapol_filter(self): return self._eapol_filter(('ether[14:4] == 0x01020000',)) @@ -449,9 +448,9 @@ def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_ def wait_8021x_flows(self, port_no): port_actions = [ - 'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno] + 'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), f'OUTPUT:{self.nfv_portno}'] from_nfv_actions = [ - 'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no] + 'SET_FIELD: {eth_src:01:80:c2:00:00:03}', f'OUTPUT:{port_no}'] from_nfv_match = { 'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e} self.wait_until_matching_flow(None, table_id=0, actions=port_actions) @@ -477,7 +476,7 @@ def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, wpa_ctrl_path = self.get_wpa_ctrl_path(host) if os.path.exists(wpa_ctrl_path): self.terminate_wpasupplicant(host) - for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines(): + for pid in host.cmd(f'lsof -t {wpa_ctrl_path}').splitlines(): try: os.kill(int(pid), 15) except (ValueError, ProcessLookupError): @@ -496,7 +495,7 @@ def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, {'eth_src': host.MAC(), 'in_port': port_num}, table_id=0) self.one_ipv4_ping( host, self.ping_host.IP(), require_host_learned=False) - host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path) + host.cmd(f'wpa_cli -p {wpa_ctrl_path} logoff') self.wait_until_no_matching_flow( {'eth_src': host.MAC(), 'in_port': port_num}, table_id=0) self.one_ipv4_ping( @@ -508,16 +507,16 @@ def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, def terminate_wpasupplicant(self, host): wpa_ctrl_path = self.get_wpa_ctrl_path(host) - host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path) + host.cmd(f'wpa_cli -p {wpa_ctrl_path} terminate') def get_wpa_ctrl_path(self, host): wpa_ctrl_path = os.path.join( - self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name)) + self.tmpdir, f'{self.tmpdir}/{host.name}-wpasupplicant') return wpa_ctrl_path @staticmethod def get_wpa_status(host, wpa_ctrl_path): - status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path) + status = host.cmdPrint(f'wpa_cli -p {wpa_ctrl_path} status') for line in status.splitlines(): if line.startswith('EAP state'): return line.split('=')[1].strip() @@ -529,14 +528,14 @@ def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5): if eap_state == 'SUCCESS': return time.sleep(1) - self.fail('did not get EAP success: %s' % eap_state) + self.fail(f'did not get EAP success: {eap_state}') def wait_for_radius(self, radius_log_path): self.wait_until_matching_lines_from_file( r'.*Ready to process requests', radius_log_path) def start_freeradius(self): - radius_log_path = '%s/radius.log' % self.tmpdir + radius_log_path = f'{self.tmpdir}/radius.log' listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})' listen_config = """listen { @@ -552,10 +551,10 @@ def start_freeradius(self): if os.path.isfile('/etc/freeradius/users'): # Assume we are dealing with freeradius 2 configuration - shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir) - users_path = '%s/freeradius/users' % self.tmpdir + shutil.copytree('/etc/freeradius/', f'{self.tmpdir}/freeradius') + users_path = f'{self.tmpdir}/freeradius/users' - with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+', encoding='utf-8') as default_site: + with open(f'{self.tmpdir}/freeradius/radiusd.conf', 'r+', encoding='utf-8') as default_site: default_config = default_site.read() default_config = re.sub(listen_match, '', default_config) default_site.seek(0) @@ -567,11 +566,11 @@ def start_freeradius(self): freerad_version = os.popen( r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip() freerad_major_version = freerad_version.split(' ')[1] - shutil.copytree('/etc/freeradius/%s/' % freerad_major_version, - '%s/freeradius' % self.tmpdir) - users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir + shutil.copytree(f'/etc/freeradius/{freerad_major_version}/', + f'{self.tmpdir}/freeradius') + users_path = f'{self.tmpdir}/freeradius/mods-config/files/authorize' - with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+', encoding='utf-8') as default_site: + with open(f'{self.tmpdir}/freeradius/sites-enabled/default', 'r+', encoding='utf-8') as default_site: default_config = default_site.read() default_config = re.sub( listen_match, '', default_config) @@ -584,13 +583,13 @@ def start_freeradius(self): with open(users_path, 'w', encoding='utf-8') as users_file: users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT)) - with open('%s/freeradius/clients.conf' % self.tmpdir, 'w', encoding='utf-8') as clients: + with open(f'{self.tmpdir}/freeradius/clients.conf', 'w', encoding='utf-8') as clients: clients.write("""client localhost { ipaddr = 127.0.0.1 secret = SECRET }""") - with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+', encoding='utf-8') as innertunnel_site: + with open(f'{self.tmpdir}/freeradius/sites-enabled/inner-tunnel', 'r+', encoding='utf-8') as innertunnel_site: tunnel_config = innertunnel_site.read() listen_config = """listen { ipaddr = 127.0.0.1 @@ -602,8 +601,8 @@ def start_freeradius(self): innertunnel_site.write(tunnel_config) innertunnel_site.truncate() - os.system('chmod o+rx %s' % self.root_tmpdir) - os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir) + os.system(f'chmod o+rx {self.root_tmpdir}') + os.system(f'chown -R root:freerad {self.tmpdir}/freeradius/') self.nfv_host.cmd( mininet_test_util.timeout_cmd( @@ -769,16 +768,16 @@ def port_up(port): username_bytes = ''.join(('%2x' % ord(c) for c in username)) tcpdump_filter = ' or '.join(( self._success_eapol_filter(True), - self._eapol_filter(('ether[23:4] == 0x%s' % username_bytes,)))) + self._eapol_filter((f'ether[23:4] == 0x{username_bytes}',)))) tcpdump_txt = self.tcpdump_helper( self.eapol1_host, tcpdump_filter, [ lambda: port_up(port_no1)], timeout=30, vflags='-vvv', packets=2) for req_str in ( - 'Identity: %s' % username, # supplicant replies with username + f'Identity: {username}', # supplicant replies with username 'Success', # supplicant success ): - self.assertTrue(req_str in tcpdump_txt, msg='%s not in %s' % (req_str, tcpdump_txt)) + self.assertTrue(req_str in tcpdump_txt, msg=f'{req_str} not in {tcpdump_txt}') self.one_ipv4_ping( self.eapol1_host, self.ping_host.IP(), @@ -1195,15 +1194,15 @@ def test_untagged(self): self.wait_until_matching_flow( {'vlan_vid': radius_vid1}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3]) + actions=[f'POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no3}']) self.wait_until_matching_flow( {'vlan_vid': vid}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no2}']) self.wait_until_no_matching_flow( {'vlan_vid': radius_vid2}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no2}']) self.one_ipv4_ping( self.eapol1_host, self.ping_host.IP(), @@ -1228,11 +1227,11 @@ def test_untagged(self): self.wait_until_no_matching_flow( {'vlan_vid': radius_vid1}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3]) + actions=['POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no2}']) self.wait_until_matching_flow( {'vlan_vid': vid}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no2}']) # check two 1x hosts play nicely. (same dyn vlan) self.assertTrue(self.try_8021x( @@ -1301,7 +1300,7 @@ def test_untagged(self): self.wait_until_matching_flow( {'vlan_vid': vid}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no2}']) self.wait_until_no_matching_flow( {'in_port': port_no2}, table_id=self._VLAN_TABLE, @@ -1309,14 +1308,14 @@ def test_untagged(self): self.wait_until_no_matching_flow( {'vlan_vid': radius_vid2}, table_id=self._FLOOD_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no2}']) self.wait_until_no_matching_flow( {'eth_src': self.eapol2_host.MAC()}, table_id=self._ETH_SRC_TABLE) self.wait_until_no_matching_flow( {'eth_dst': self.eapol2_host.MAC(), 'vlan_vid': radius_vid1}, table_id=self._ETH_DST_TABLE, - actions=['POP_VLAN', 'OUTPUT:%s' % port_no2]) + actions=['POP_VLAN', f'OUTPUT:{port_no2}']) self.post_test_checks() @@ -1374,7 +1373,7 @@ def test_untagged(self): super().test_untagged() # Confirm controller can see switch interface with traffic. - ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % last_host_switch_intf) + ifconfig_output = self.net.controllers[0].cmd(f'ifconfig {last_host_switch_intf}') self.assertTrue( re.search('(R|T)X packets[: ][1-9]', ifconfig_output), msg=ifconfig_output) @@ -1487,7 +1486,7 @@ def test_untagged(self): timeout = 5 * 3 tcpdump_txt = self.tcpdump_helper( first_host, tcpdump_filter, [ - lambda: first_host.cmd('sleep %u' % timeout)], + lambda: first_host.cmd(f'sleep {timeout}')], timeout=timeout, vflags='-vv', packets=1) oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3]) faucet_lldp_dp_id_attr = '%2.2x' % 1 @@ -1538,7 +1537,7 @@ def test_untagged(self): timeout = interval * 3 tcpdump_txt = self.tcpdump_helper( first_host, tcpdump_filter, [ - lambda: first_host.cmd('sleep %u' % timeout)], + lambda: first_host.cmd(f'sleep {timeout}')], # output epoch secs timeout=timeout, vflags='-tt', packets=2) timestamps = re.findall(r'(\d+)\.\d+ [0-9a-f:]+ \> [0-9a-f:]+', tcpdump_txt) @@ -1567,7 +1566,7 @@ def test_untagged(self): timeout = 5 * 3 tcpdump_txt = self.tcpdump_helper( first_host, tcpdump_filter, [ - lambda: first_host.cmd('sleep %u' % timeout)], + lambda: first_host.cmd(f'sleep {timeout}')], timeout=timeout, vflags='-vv', packets=1) for lldp_required in ( r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC, @@ -1576,7 +1575,7 @@ def test_untagged(self): r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']): self.assertTrue( re.search(lldp_required, tcpdump_txt), - msg='%s: %s' % (lldp_required, tcpdump_txt)) + msg=f'{lldp_required}: {tcpdump_txt}') class FaucetUntaggedMeterParseTest(FaucetUntaggedTest): @@ -1804,12 +1803,11 @@ def test_untagged(self): netns = self.hostns(first_host) setup_cmds = [] setup_cmds.extend( - ['ip link set %s netns %s' % (macvlan2_intf, netns)]) + [f'ip link set {macvlan2_intf} netns {netns}']) for exec_cmd in ( - ('ip address add %s/24 brd + dev %s' % ( - macvlan2_ipv4, macvlan2_intf), - 'ip link set %s up' % macvlan2_intf)): - setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd)) + (f'ip address add {macvlan2_ipv4}/24 brd + dev {macvlan2_intf}', + f'ip link set {macvlan2_intf} up')): + setup_cmds.append(f'ip netns exec {netns} {exec_cmd}') self.quiet_commands(first_host, setup_cmds) self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4) self.one_ipv4_ping(first_host, second_host.IP()) @@ -1885,7 +1883,7 @@ def test_scapy_fuzz(self): try: scapy.all.send(scapy.all.fuzz(scapy.all.Ether())) # pylint: disable=no-member except Exception as e: # pylint: disable=broad-except - error('%s:' % self._test_name(), e) + error(f'{self._test_name()}:', e) exception = True self.assertFalse(exception, 'Scapy threw an exception in send(fuzz())') @@ -1901,41 +1899,36 @@ def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS): port_state = port_desc['state'] port_config = port_desc['config'] port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6 - error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps)) + error(f'DP {dp_port} is {port_name}, at {port_speed_mbps} mbps\n') if port_speed_mbps < min_mbps: - error('port speed %u below minimum %u mbps\n' % ( - port_speed_mbps, min_mbps)) + error(f'port speed {port_speed_mbps} below minimum {min_mbps} mbps\n') elif port_config != 0: - error('port config %u must be 0 (all clear)' % port_config) + error(f'port config {port_config} must be 0 (all clear)') elif port_state not in (0, 4): - error('state %u must be 0 (all flags clear or live)\n' % ( - port_state)) + error(f'state {port_state} must be 0 (all flags clear or live)\n') else: return time.sleep(1) - self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc)) + self.fail(f'DP port {dp_port} not healthy ({port_desc})') def test_portmap(self): prom_desc = self.scrape_prometheus(var='of_dp_desc_stats') self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats') - error('DP: %s\n' % prom_desc[0]) - error('port_map: %s\n' % self.port_map) + error(f'DP: {prom_desc[0]}\n') + error(f'port_map: {self.port_map}\n') for i, host in enumerate(self.hosts_name_ordered(), start=1): - in_port = 'port_%u' % i + in_port = f'port_{i}' dp_port = self.port_map[in_port] if dp_port in self.switch_map: - error('verifying cabling for %s: host %s -> dp %u\n' % ( - in_port, self.switch_map[dp_port], dp_port)) + error(f'verifying cabling for {in_port}: host {self.switch_map[dp_port]} -> dp {dp_port}\n') else: - error('verifying host %s -> dp %s\n' % ( - in_port, dp_port)) + error(f'verifying host {in_port} -> dp {dp_port}\n') self.verify_dp_port_healthy(dp_port) self.require_host_learned(host, in_port=dp_port) learned = self.prom_macs_learned() self.assertEqual( len(self.hosts_name_ordered()), len(learned), - msg='test requires exactly %u hosts learned (got %s)' % ( - len(self.hosts_name_ordered()), learned)) + msg=f'test requires exactly {len(self.hosts_name_ordered())} hosts learned (got {learned})') def test_listening(self): msg_template = ( @@ -1966,7 +1959,7 @@ def test_silence(self): self.tcpdump_rx_packets(tcpdump_txt, 0) self.assertTrue( self.tcpdump_rx_packets(tcpdump_txt, 0), - msg='got unexpected packet from test switch: %s' % tcpdump_txt) + msg=f'got unexpected packet from test switch: {tcpdump_txt}') class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest): @@ -2160,7 +2153,7 @@ def _start_gauge_check(self): self.server_thread.start() return None except socket.error as err: - return 'cannot start Influx test server: %s' % err + return f'cannot start Influx test server: {err}' def test_untagged(self): self.ping_all_when_learned() @@ -2734,8 +2727,8 @@ def test_untagged(self): first_host, second_host = self.hosts_name_ordered()[:2] self.ping_all_when_learned() for i in range(10, 10 + (self.MAX_HOSTS * 2)): - mac_intf = 'mac%u' % i - mac_ipv4 = '10.0.0.%u' % i + mac_intf = f'mac{i}' + mac_ipv4 = f'10.0.0.{i}' self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4) ping_cmd = mininet_test_util.timeout_cmd( 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % ( @@ -2794,15 +2787,15 @@ def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts): self.TIMEOUT / 2) for _ in range(3): fping_out = first_host.cmd(fping_cmd) - self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd) + self.assertTrue(fping_out, msg=f'fping did not complete: {fping_cmd}') macs_learned = self.hosts_learned(hosts) if len(macs_learned) == len(hosts): return time.sleep(1) first_host_diag = first_host.cmd('ifconfig -a ; arp -an') second_host_diag = second_host.cmd('ifconfig -a ; arp -an') - self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % ( - mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag)) + self.fail(f'{mac_ips} cannot be learned ({macs_learned} != {fping_out})\n' \ + f'first host {first_host_diag}\nsecond host {second_host_diag}\n') def test_untagged(self): first_host, second_host = self.hosts_name_ordered()[:2] @@ -2815,9 +2808,9 @@ def add_macvlans(base, count): mac_ips = [] learned_mac_ports = {} for i in range(base, base + count): - mac_intf = 'mac%u' % i + mac_intf = f'mac{i}' mac_intfs.append(mac_intf) - mac_ipv4 = '10.0.0.%u' % i + mac_ipv4 = f'10.0.0.{i}' self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4) macvlan_mac = self.get_mac_of_intf(mac_intf, second_host) learned_mac_ports[macvlan_mac] = self.port_map['port_2'] @@ -2826,7 +2819,7 @@ def add_macvlans(base, count): def down_macvlans(macvlans): for macvlan in macvlans: - second_host.cmd('ip link set dev %s down' % macvlan) + second_host.cmd(f'ip link set dev {macvlan} down') def learn_then_down_hosts(base, count): mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count) @@ -2851,7 +2844,7 @@ def learn_then_down_hosts(base, count): break time.sleep(1) - self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs) + self.assertFalse(learned_macs, msg=f'MACs did not expire: {learned_macs}') self.assertTrue(before_expiry_learned_macs) for mac in before_expiry_learned_macs: @@ -2916,7 +2909,7 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use def test_untagged(self): test_net = ipaddress.IPv4Network( - '%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX)) + f'{self.TEST_IPV4_NET}/{self.TEST_IPV4_PREFIX}') learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4) self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS) @@ -2962,7 +2955,7 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use def test_untagged(self): test_net = ipaddress.IPv4Network( - '%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX)) + f'{self.TEST_IPV4_NET}/{self.TEST_IPV4_PREFIX}') learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4) self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS) @@ -2983,7 +2976,7 @@ def _configure_count_with_retry(self, expected_count): time.sleep(1) self.assertEqual( counts, expected, - 'Controller configure counts %s != expected counts %s' % (counts, expected)) + f'Controller configure counts {counts} != expected counts {expected}') def test_untagged(self): """Test that FAUCET receives HUP signal and keeps switching.""" @@ -3057,7 +3050,7 @@ def setUp(self): super().setUp() self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt') self.CONFIG = '\n'.join( - (self.CONFIG, 'include:\n - %s' % self.acl_config_file)) + (self.CONFIG, f'include:\n - {self.acl_config_file}')) with open(self.acl_config_file, 'w', encoding='utf-8') as acf: acf.write(self.START_ACL_CONFIG) self.topo = self.topo_class( @@ -3080,18 +3073,18 @@ def _push_tuples(self, eth_type, host_ips): 'ip_proto': 6, 'tcp_src': port, 'tcp_dst': port, - 'ipv%u_src' % host_ip.version: ip_match, - 'ipv%u_dst' % host_ip.version: ip_match, + f'ipv{host_ip.version}_src': ip_match, + f'ipv{host_ip.version}_dst': ip_match, 'actions': {'allow': 1}, } rules_yaml.append({'rule': rule_yaml}) yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}} - tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version) - error('pushing %s' % tuple_txt) + tuple_txt = f'{len(rules_yaml)} IPv{host_ip.version} tuples\n' + error(f'pushing {tuple_txt}') self.reload_conf( yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error restart=True, cold_start=False) - error('pushed %s' % tuple_txt) + error(f'pushed {tuple_txt}') self.wait_until_matching_flow( {'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0) rules *= 2 @@ -3243,11 +3236,11 @@ def setUp(self): super().setUp() self.ACL_COOKIE = random.randint(1, 2**16 - 1) self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE)) - self.acl_config_file = '%s/acl.yaml' % self.tmpdir + self.acl_config_file = f'{self.tmpdir}/acl.yaml' with open(self.acl_config_file, 'w', encoding='utf-8') as config_file: config_file.write(self.ACL) self.CONFIG = '\n'.join( - (self.CONFIG, 'include:\n - %s' % self.acl_config_file)) + (self.CONFIG, f'include:\n - {self.acl_config_file}')) self.topo = self.topo_class( self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid], n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED, @@ -3335,7 +3328,7 @@ def test_port_change_vlan(self): restart=False, cold_start=False) self.wait_until_matching_flow( {'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE, - actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE]) + actions=['OUTPUT:CONTROLLER', f'GOTO_TABLE:{self._ETH_DST_TABLE}']) self.change_port_config( self.port_map['port_2'], 'native_vlan', 200, restart=True, cold_start=False) @@ -3362,7 +3355,7 @@ def test_port_change_acl(self): table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE) self.wait_until_matching_flow( {'vlan_vid': 100}, table_id=self._ETH_SRC_TABLE, - actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE]) + actions=['OUTPUT:CONTROLLER', f'GOTO_TABLE:{self._ETH_DST_TABLE}']) self.verify_tp_dst_blocked(5001, first_host, second_host) self.verify_tp_dst_notblocked(5002, first_host, second_host) self.reload_conf( @@ -3479,7 +3472,7 @@ def test_port_change_vlan(self): restart=False, cold_start=False) self.wait_until_matching_flow( {'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE, - actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE]) + actions=['OUTPUT:CONTROLLER', f'GOTO_TABLE:{self._ETH_DST_TABLE}']) self.change_port_config( self.port_map['port_2'], 'native_vlan', 200, restart=True, cold_start=False) @@ -3824,7 +3817,7 @@ def test_untagged(self): if not self.get_matching_flow(match, table_id=table): return time.sleep(1) - self.fail('host route %s still present' % match) + self.fail(f'host route {match} still present') class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest): @@ -4013,8 +4006,8 @@ def test_untagged(self): coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered() self.one_ipv4_ping(first_host, second_host.IP()) tcpdump_filter = ' and '.join(( - 'ether dst %s' % first_host.MAC(), - 'ether src %s' % coprocessor_host.MAC(), + f'ether dst {first_host.MAC()}', + f'ether src {coprocessor_host.MAC()}', 'icmp')) cmds = [ lambda: coprocessor_host.cmd( @@ -4065,7 +4058,7 @@ def setUp(self): def total_port_bans(self): total_bans = 0 for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED): - port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)]) + port_labels = self.port_labels(self.port_map[f'port_{i + 1}']) total_bans += self.scrape_prometheus_var( 'port_learn_bans', port_labels, dpid=True, default=0) return total_bans @@ -4091,13 +4084,13 @@ def test_untagged(self): 'brctl setfd br-loop1 0', 'ip link set br-loop1 up', 'brctl addif br-loop1 veth-loop1', - 'brctl addif br-loop1 %s-eth0' % second_host.name, + f'brctl addif br-loop1 {second_host.name}-eth0', # Connect other leg of veth pair. 'brctl addbr br-loop2', 'brctl setfd br-loop2 0', 'ip link set br-loop2 up', 'brctl addif br-loop2 veth-loop2', - 'brctl addif br-loop2 %s-eth1' % second_host.name)) + f'brctl addif br-loop2 {second_host.name}-eth1')) # Flood some traffic into the loop for _ in range(3): @@ -4245,14 +4238,14 @@ def get_lacp_port_id(port): port priority: 2 port number: %d port state: 62 -""".strip() % tuple(get_lacp_port_id(self.port_map['port_%u' % i]) for i in lag_ports) +""".strip() % tuple(get_lacp_port_id(self.port_map[f'port_{i}']) for i in lag_ports) lacp_timeout = 5 def prom_lacp_up_ports(): lacp_up_ports = 0 for lacp_port in lag_ports: - port_labels = self.port_labels(self.port_map['port_%u' % lacp_port]) + port_labels = self.port_labels(self.port_map[f'port_{lacp_port}']) lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0) lacp_up_ports += 1 if lacp_state == 3 else 0 return lacp_up_ports @@ -4275,12 +4268,11 @@ def require_linux_bond_up(): time.sleep(1) self.assertTrue( re.search(synced_state_txt, result), - msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % ( - result, synced_state_txt)) + msg=f'LACP did not synchronize: {result}\n\nexpected:\n\n{synced_state_txt}') # Start with ports down. for port in lag_ports: - self.set_port_down(self.port_map['port_%u' % port]) + self.set_port_down(self.port_map[f'port_{port}']) require_lag_up_ports(0) orig_ip = first_host.IP() switch = self.first_switch() @@ -4288,45 +4280,45 @@ def require_linux_bond_up(): # Deconfigure bond members for bond_member in bond_members: self.quiet_commands(first_host, ( - 'ip link set %s down' % bond_member, - 'ip address flush dev %s' % bond_member)) + f'ip link set {bond_member} down', + f'ip address flush dev {bond_member}')) # Configure bond interface self.quiet_commands(first_host, ( - ('ip link add %s address 0e:00:00:00:00:99 ' - 'type bond mode 802.3ad lacp_rate fast miimon 100') % bond, - 'ip add add %s/24 dev %s' % (orig_ip, bond), - 'ip link set %s up' % bond)) + f'ip link add {bond} address 0e:00:00:00:00:99 ' \ + f'type bond mode 802.3ad lacp_rate fast miimon 100', + f'ip add add {orig_ip}/24 dev {bond}', + f'ip link set {bond} up')) # Add bond members for bond_member in bond_members: self.quiet_commands(first_host, ( - 'ip link set dev %s master %s' % (bond_member, bond),)) + f'ip link set dev {bond_member} master {bond}',)) for _flaps in range(2): # All ports down. for port in lag_ports: - self.set_port_down(self.port_map['port_%u' % port]) + self.set_port_down(self.port_map[f'port_{port}']) require_lag_up_ports(0) # Pick a random port to come up. up_port = random.choice(lag_ports) - self.set_port_up(self.port_map['port_%u' % up_port]) + self.set_port_up(self.port_map[f'port_{up_port}']) require_lag_up_ports(1) # We have connectivity with only one port. self.one_ipv4_ping( first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5) for port in lag_ports: - self.set_port_up(self.port_map['port_%u' % port]) + self.set_port_up(self.port_map[f'port_{port}']) # We have connectivity with two ports. require_lag_up_ports(2) require_linux_bond_up() self.one_ipv4_ping( first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5) # We have connectivity if that random port goes down. - self.set_port_down(self.port_map['port_%u' % up_port]) + self.set_port_down(self.port_map[f'port_{up_port}']) require_lag_up_ports(1) self.one_ipv4_ping( first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5) for port in lag_ports: - self.set_port_up(self.port_map['port_%u' % port]) + self.set_port_up(self.port_map[f'port_{port}']) class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest): @@ -4338,15 +4330,15 @@ def test_untagged(self): switch = self.first_switch() bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)] for i, bond_member in enumerate(bond_members): - bond = 'bond%u' % i + bond = f'bond{i}' self.quiet_commands(first_host, ( - 'ip link set %s down' % bond_member, - 'ip address flush dev %s' % bond_member, + f'ip link set {bond_member} down', + f'ip address flush dev {bond_member}', ('ip link add %s address 0e:00:00:00:00:%2.2x ' 'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i * 2 + i), - 'ip add add %s/24 dev %s' % (orig_ip, bond), - 'ip link set %s up' % bond, - 'ip link set dev %s master %s' % (bond_member, bond))) + f'ip add add {orig_ip}/24 dev {bond}', + f'ip link set {bond} up', + f'ip link set dev {bond_member} master {bond}')) self.wait_until_matching_lines_from_faucet_log_files(r'.+actor system mismatch.+') @@ -4379,8 +4371,7 @@ def test_fuzz_controller(self): fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)): fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180)) self.assertTrue( - re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % ( - fuzz_cmd, fuzz_out)) + re.search(f'Sent {packets} packets', fuzz_out), msg=f'{fuzz_cmd}: {fuzz_out}') self.one_ipv4_controller_ping(first_host) def test_flap_ping_controller(self): @@ -4435,12 +4426,12 @@ def test_ndisc6(self): for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'): self.assertEqual( self.FAUCET_MAC.upper(), - first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip()) + first_host.cmd(f'ndisc6 -q {vip} {first_host.defaultIntf()}').strip()) def test_rdisc6(self): first_host = self.hosts_name_ordered()[0] rdisc6_results = sorted(list(set(first_host.cmd( - 'rdisc6 -q %s' % first_host.defaultIntf()).splitlines()))) + f'rdisc6 -q {first_host.defaultIntf()}').splitlines()))) self.assertEqual( ['fc00::1:0/112', 'fc00::2:0/112'], rdisc6_results) @@ -4449,7 +4440,7 @@ def test_ra_advertise(self): first_host = self.hosts_name_ordered()[0] tcpdump_filter = ' and '.join(( 'ether dst 33:33:00:00:00:01', - 'ether src %s' % self.FAUCET_MAC, + f'ether src {self.FAUCET_MAC}', 'icmp6', 'ip6[40] == 134', 'ip6 host fe80::1:254')) @@ -4463,20 +4454,20 @@ def test_ra_advertise(self): r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC): self.assertTrue( re.search(ra_required, tcpdump_txt), - msg='%s: %s' % (ra_required, tcpdump_txt)) + msg=f'{ra_required}: {tcpdump_txt}') def test_rs_reply(self): first_host = self.hosts_name_ordered()[0] tcpdump_filter = ' and '.join(( - 'ether src %s' % self.FAUCET_MAC, - 'ether dst %s' % first_host.MAC(), + f'ether src {self.FAUCET_MAC}', + f'ether dst {first_host.MAC()}', 'icmp6', 'ip6[40] == 134', 'ip6 host fe80::1:254')) tcpdump_txt = self.tcpdump_helper( first_host, tcpdump_filter, [ lambda: first_host.cmd( - 'rdisc6 -1 %s' % first_host.defaultIntf())], + f'rdisc6 -1 {first_host.defaultIntf()}')], timeout=30, vflags='-vv', packets=1) for ra_required in ( r'fe80::1:254 > fe80::.+ICMP6, router advertisement', @@ -4485,7 +4476,7 @@ def test_rs_reply(self): r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC): self.assertTrue( re.search(ra_required, tcpdump_txt), - msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter)) + msg=f'{ra_required}: {tcpdump_txt} ({tcpdump_filter})') class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest): @@ -4521,7 +4512,7 @@ def test_fuzz_controller(self): abort = False def note(*args): - error('%s:' % self._test_name(), *args + tuple('\n')) + error(f'{self._test_name()}:', *args + tuple('\n')) # Some of these tests have been slowing down and timing out, # So this code is intended to allow some debugging and analysis @@ -4541,7 +4532,7 @@ def note(*args): abort = True break popen.wait() - if 'Sent %u packets' % packets in out: + if f'Sent {packets} packets' in out: count += packets elapsed = time.time() - start note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed) @@ -5101,19 +5092,19 @@ def test_untagged(self): second_host, tcpdump_filter, [ lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) tcpdump_txt = self.tcpdump_helper( third_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')), + f'arp -s {third_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % third_host.IP(), tcpdump_txt)) + f'{third_host.IP()}: ICMP echo request', tcpdump_txt)) tcpdump_txt = self.tcpdump_helper( fourth_host, tcpdump_filter, [ lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))]) self.assertFalse(re.search( - '%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt)) + f'{fourth_host.IP()}: ICMP echo request', tcpdump_txt)) class FaucetMultiOrderedOutputTest(FaucetUntaggedTest): @@ -5150,19 +5141,19 @@ def test_untagged(self): second_host, tcpdump_filter, [ lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) tcpdump_txt = self.tcpdump_helper( third_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')), + f'arp -s {third_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % third_host.IP(), tcpdump_txt)) + f'{third_host.IP()}: ICMP echo request', tcpdump_txt)) tcpdump_txt = self.tcpdump_helper( fourth_host, tcpdump_filter, [ lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))]) self.assertFalse(re.search( - '%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt)) + f'{fourth_host.IP()}: ICMP echo request', tcpdump_txt)) class FaucetUntaggedOutputTest(FaucetUntaggedTest): @@ -5204,10 +5195,10 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 123', tcpdump_txt)) @@ -5251,10 +5242,10 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 123', tcpdump_txt)) @@ -5298,10 +5289,10 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 456.+vlan 123', tcpdump_txt)) @@ -5345,10 +5336,10 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))]) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 456.+vlan 123', tcpdump_txt)) @@ -5392,11 +5383,11 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], packets=1) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt), msg=tcpdump_txt) self.assertTrue(re.search( 'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt) @@ -5440,11 +5431,11 @@ def test_untagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], packets=1) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt), msg=tcpdump_txt) self.assertTrue(re.search( 'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt) @@ -5642,7 +5633,7 @@ def test_tagged(self): self.wait_until_matching_flow( {'vlan_vid': 100, 'in_port': port}, table_id=self._VLAN_TABLE, - actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE]) + actions=[f'GOTO_TABLE:{self._ETH_SRC_TABLE}']) self.change_port_config( self.port_map['port_3'], 'mirror', None, restart=True, cold_start=False) @@ -5650,7 +5641,7 @@ def test_tagged(self): self.wait_until_matching_flow( {'vlan_vid': 100, 'in_port': port}, table_id=self._VLAN_TABLE, - actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE]) + actions=[f'GOTO_TABLE:{self._ETH_SRC_TABLE}']) class FaucetTaggedVLANPCPTest(FaucetTaggedTest): @@ -5690,16 +5681,16 @@ def test_tagged(self): first_host, second_host = self.hosts_name_ordered()[:2] self.quiet_commands( first_host, - ['ip link set %s type vlan egress %u:1' % ( - first_host.defaultIntf(), i) for i in range(0, 8)]) + [f'ip link set {first_host.defaultIntf()} type vlan egress {i}:1' + for i in range(0, 8)]) self.one_ipv4_ping(first_host, second_host.IP()) self.wait_nonzero_packet_count_flow( {'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE) - tcpdump_filter = 'ether dst %s' % second_host.MAC() + tcpdump_filter = f'ether dst {second_host.MAC()}' tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1) + f'ping -c3 {second_host.IP()}')], root_intf=True, packets=1) self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt)) @@ -5740,16 +5731,16 @@ def test_tagged(self): first_host, second_host = self.hosts_name_ordered()[:2] self.quiet_commands( first_host, - ['ip link set %s type vlan egress %u:1' % ( - first_host.defaultIntf(), i) for i in range(0, 8)]) + [f'ip link set {first_host.defaultIntf()} type vlan egress {i}:1' + for i in range(0, 8)]) self.one_ipv4_ping(first_host, second_host.IP()) self.wait_nonzero_packet_count_flow( {'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE) - tcpdump_filter = 'ether dst %s' % second_host.MAC() + tcpdump_filter = f'ether dst {second_host.MAC()}' tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1) + f'ping -c3 {second_host.IP()}')], root_intf=True, packets=1) self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt)) @@ -5772,7 +5763,7 @@ def global_vid(): # pylint: disable=no-method-argument,no-self-use @staticmethod def netbase(vid, host): - return ipaddress.ip_interface('192.168.%u.%u' % (vid, host)) + return ipaddress.ip_interface(f'192.168.{vid}.{host}') def fping(self, macvlan_int, ipg): return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % ( @@ -5785,7 +5776,7 @@ def macvlan_ping(self, host, ipa, macvlan_int): return self.one_ipv4_ping(host, ipa, intf=macvlan_int) def run_ip(self, args): - return 'ip -%u %s' % (self.IPV, args) + return f'ip -{self.IPV} {args}' CONFIG_GLOBAL = """ routers: @@ -5833,31 +5824,30 @@ def configure_mesh(self, first_host, second_host): for i, host in enumerate(hosts, start=1): setup_commands = [] for vid in self.NEW_VIDS: - vlan_int = '%s.%u' % (host.intf_root_name, vid) - macvlan_int = 'macvlan%u' % vid + vlan_int = f'{host.intf_root_name}.{vid}' + macvlan_int = f'macvlan{vid}' ipa = self.netbase(vid, i) ipg = self.netbase(vid, 254) ipd = self.netbase(vid, 253) required_ipds.add(str(ipd.ip)) ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host) setup_commands.extend([ - self.run_ip('link add link %s name %s type vlan id %u' % ( - host.intf_root_name, vlan_int, vid)), - self.run_ip('link set dev %s up' % vlan_int), - self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)), - self.run_ip('link set dev %s up' % macvlan_int), - self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)), - self.run_ip('route add default via %s table %u' % (ipg.ip, vid)), - self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)), + self.run_ip(f'link add link {host.intf_root_name} name {vlan_int} type vlan id {vid}'), + self.run_ip(f'link set dev {vlan_int} up'), + self.run_ip(f'link add {macvlan_int} link {vlan_int} type macvlan mode vepa'), + self.run_ip(f'link set dev {macvlan_int} up'), + self.run_ip(f'address add {ipa.ip}/{self.NETPREFIX} dev {macvlan_int}'), + self.run_ip(f'route add default via {ipg.ip} table {vid}'), + self.run_ip(f'rule add from {ipa} table {vid} priority 100'), # stimulate learning attempts for down host. - self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))]) + self.run_ip(f'neigh add {ipd.ip} lladdr {self.FAUCET_MAC} dev {macvlan_int}')]) # next host routes via FAUCET for other host in same connected subnet # to cause routing to be exercised. for j, _ in enumerate(hosts, start=1): if j != i: other_ip = self.netbase(vid, j) setup_commands.append( - self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid))) + self.run_ip(f'route add {other_ip} via {ipg.ip} table {vid}')) for ipa in (ipg.ip, ipd.ip): setup_commands.append(self.fping(macvlan_int, ipa)) @@ -5884,7 +5874,7 @@ def verify_drop_rules(self, required_ipds, ipd_to_macvlan): macvlan_int, host = ipd_to_macvlan[ipd] host.cmd(self.fping(macvlan_int, ipd)) time.sleep(1) - self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds) + self.assertFalse(required_ipds, msg=f'no drop rules for {required_ipds}') def verify_routing_performance(self, first_host, second_host): for first_host_ip, second_host_ip in ( @@ -5899,29 +5889,29 @@ def verify_routing_performance(self, first_host, second_host): def verify_l3_mesh(self, first_host, second_host): for vid in self.NEW_VIDS: - macvlan_int = 'macvlan%u' % vid + macvlan_int = f'macvlan{vid}' first_host_ip = self.netbase(vid, 1) second_host_ip = self.netbase(vid, 2) self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int) self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int) def verify_l3_hairpin(self, first_host): - macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0] - macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1] + macvlan1_int = f'macvlan{self.NEW_VIDS[0]}' + macvlan2_int = f'macvlan{self.NEW_VIDS[1]}' macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1) macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254) macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254) netns = self.hostns(first_host) setup_cmds = [] setup_cmds.extend( - [self.run_ip('link set %s netns %s' % (macvlan2_int, netns))]) + [self.run_ip(f'link set {macvlan2_int} netns {netns}')]) for exec_cmd in ( - (self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)), - self.run_ip('link set %s up' % macvlan2_int), - self.run_ip('route add default via %s' % macvlan2_gw.ip))): - setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd)) + (self.run_ip(f'address add {macvlan2_ip.ip}/{self.NETPREFIX} dev {macvlan2_int}'), + self.run_ip(f'link set {macvlan2_int} up'), + self.run_ip(f'route add default via {macvlan2_gw.ip}'))): + setup_cmds.append(f'ip netns exec {netns} {exec_cmd}') setup_cmds.append( - self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip))) + self.run_ip(f'route add {macvlan2_ip} via {macvlan1_gw.ip}')) self.quiet_commands(first_host, setup_cmds) self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int) @@ -5954,7 +5944,7 @@ def global_vid(): # pylint: disable=no-method-argument,no-self-use NEW_VIDS = VIDS[1:] def netbase(self, vid, host): - return ipaddress.ip_interface('fc00::%u:%u' % (vid, host)) + return ipaddress.ip_interface(f'fc00::{vid}:{host}') def fib_table(self): return self._IPV6_FIB_TABLE @@ -5967,7 +5957,7 @@ def macvlan_ping(self, host, ipa, macvlan_int): return self.one_ipv6_ping(host, ipa, intf=macvlan_int) def run_ip(self, args): - return 'ip -%u %s' % (self.IPV, args) + return f'ip -{self.IPV} {args}' CONFIG_GLOBAL = """ routers: @@ -6038,21 +6028,20 @@ def test_tagged(self): for host in self.hosts_name_ordered(): setup_commands = [] for vid in self.NEW_VIDS: - vlan_int = '%s.%u' % (host.intf_root_name, vid) + vlan_int = f'{host.intf_root_name}.{vid}' setup_commands.extend([ - 'ip link add link %s name %s type vlan id %u' % ( - host.intf_root_name, vlan_int, vid), - 'ip link set dev %s up' % vlan_int]) + f'ip link add link {host.intf_root_name} name {vlan_int} type vlan id {vid}', + f'ip link set dev {vlan_int} up']) self.quiet_commands(host, setup_commands) for host in self.hosts_name_ordered(): rdisc6_commands = [] for vid in self.NEW_VIDS: - vlan_int = '%s.%u' % (host.intf_root_name, vid) + vlan_int = f'{host.intf_root_name}.{vid}' rdisc6_commands.append( 'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int) self.quiet_commands(host, rdisc6_commands) for vlan in self.NEW_VIDS: - vlan_int = '%s.%u' % (host.intf_root_name, vid) + vlan_int = f'{host.intf_root_name}.{vid}' for _ in range(3): for host in self.hosts_name_ordered(): self.quiet_commands( @@ -6065,7 +6054,7 @@ def test_tagged(self): time.sleep(1) self.assertGreater( vlan_hosts_learned, 1, - msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned)) + msg=f'not all VLAN {vlan} hosts learned ({vlan_hosts_learned})') class FaucetTaggedBroadcastTest(FaucetTaggedTest): @@ -6131,7 +6120,7 @@ class FaucetTaggedWithUntaggedTest(FaucetTaggedTest): def test_tagged(self): self.ping_all_when_learned() native_ips = [ - ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))] + ipaddress.ip_interface(f'10.99.99.{i + 1}/24') for i in range(len(self.hosts_name_ordered()))] for native_ip, host in zip(native_ips, self.hosts_name_ordered()): self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name) for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()): @@ -6180,11 +6169,11 @@ def test_acl(tcpdump_host, tcpdump_filter): tcpdump_txt = self.tcpdump_helper( tcpdump_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( tcpdump_filter, tcpdump_txt)) @@ -6234,11 +6223,11 @@ def test_acl(tcpdump_host, tcpdump_filter): tcpdump_txt = self.tcpdump_helper( tcpdump_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( tcpdump_filter, tcpdump_txt)) @@ -6288,11 +6277,11 @@ def test_tagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 101', tcpdump_txt)) @@ -6337,11 +6326,11 @@ def test_tagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) self.assertTrue(re.search( 'vlan 101', tcpdump_txt)) @@ -6385,12 +6374,12 @@ def test_tagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd( ' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], packets=10, root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) class FaucetTaggedPopVlansOrderedOutputTest(FaucetTaggedTest): @@ -6432,12 +6421,12 @@ def test_tagged(self): tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')), + f'arp -s {second_host.IP()} 01:02:03:04:05:06'), lambda: first_host.cmd( ' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], packets=10, root_intf=True) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest): @@ -6962,10 +6951,10 @@ def test_untagged(self): self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip) self.one_ipv4_ping(first_host, second_host_ip.ip) self.one_ipv4_ping(second_host, first_host_ip.ip) - second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name) + second_host.cmd(f'ifconfig {second_host.defaultIntf().name} down') expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip self.wait_until_matching_lines_from_faucet_log_files(expired_re) - second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name) + second_host.cmd(f'ifconfig {second_host.defaultIntf().name} up') self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip) self.one_ipv4_ping(second_host, first_host_ip.ip) self.one_ipv4_ping(first_host, second_host_ip.ip) @@ -7680,31 +7669,30 @@ def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use def test_untagged(self): first_host, second_host = self.hosts_name_ordered()[0:2] # we expect to see the rewritten mac address. - tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC) + tcpdump_filter = (f'icmp and ether dst {self.REWRITE_MAC}') tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)), + f'arp -s {second_host.IP()} {self.OVERRIDE_MAC}'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], timeout=5, packets=1) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host): overridden_host.setMAC(self.OVERRIDE_MAC) rewrite_host.setMAC(self.REWRITE_MAC) - rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC())) + rewrite_host.cmd(f'arp -s {overridden_host.IP()} {overridden_host.MAC()}') rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP()))) self.wait_until_matching_flow( {'dl_dst': self.REWRITE_MAC}, table_id=self._ETH_DST_TABLE, - actions=['OUTPUT:%u' % self.port_map['port_3']]) - tcpdump_filter = ('icmp and ether src %s and ether dst %s' % ( - source_host.MAC(), rewrite_host.MAC())) + actions=[f'OUTPUT:{self.port_map["port_3"]}']) + tcpdump_filter = f'icmp and ether src {source_host.MAC()} and ether dst {rewrite_host.MAC()}' tcpdump_txt = self.tcpdump_helper( tcpdump_host, tcpdump_filter, [ lambda: source_host.cmd( - 'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())), + f'arp -s {rewrite_host.IP()} {overridden_host.MAC()}'), # this will fail if no reply lambda: self.one_ipv4_ping( source_host, rewrite_host.IP(), require_host_learned=False)], @@ -7712,7 +7700,7 @@ def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdum # ping from h1 to h2.mac should appear in third host, and not second host, as # the acl should rewrite the dst mac. self.assertFalse(re.search( - '%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt)) + f'{rewrite_host.IP()}: ICMP echo request', tcpdump_txt)) def test_switching(self): """Tests that a acl can rewrite the destination mac address, @@ -7770,31 +7758,30 @@ def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use def test_untagged(self): first_host, second_host = self.hosts_name_ordered()[0:2] # we expect to see the rewritten mac address. - tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC) + tcpdump_filter = (f'icmp and ether dst {self.REWRITE_MAC}') tcpdump_txt = self.tcpdump_helper( second_host, tcpdump_filter, [ lambda: first_host.cmd( - 'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)), + f'arp -s {second_host.IP()} {self.OVERRIDE_MAC}'), lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))], timeout=5, packets=1) self.assertTrue(re.search( - '%s: ICMP echo request' % second_host.IP(), tcpdump_txt)) + f'{second_host.IP()}: ICMP echo request', tcpdump_txt)) def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host): overridden_host.setMAC(self.OVERRIDE_MAC) rewrite_host.setMAC(self.REWRITE_MAC) - rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC())) + rewrite_host.cmd(f'arp -s {overridden_host.IP()} {overridden_host.MAC()}') rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP()))) self.wait_until_matching_flow( {'dl_dst': self.REWRITE_MAC}, table_id=self._ETH_DST_TABLE, - actions=['OUTPUT:%u' % self.port_map['port_3']]) - tcpdump_filter = ('icmp and ether src %s and ether dst %s' % ( - source_host.MAC(), rewrite_host.MAC())) + actions=[f'OUTPUT:{self.port_map["port_3"]}']) + tcpdump_filter = f'icmp and ether src {source_host.MAC()} and ether dst {rewrite_host.MAC()}' tcpdump_txt = self.tcpdump_helper( tcpdump_host, tcpdump_filter, [ lambda: source_host.cmd( - 'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())), + f'arp -s {rewrite_host.IP()} {overridden_host.MAC()}'), # this will fail if no reply lambda: self.one_ipv4_ping( source_host, rewrite_host.IP(), require_host_learned=False)], @@ -7802,7 +7789,7 @@ def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdum # ping from h1 to h2.mac should appear in third host, and not second host, as # the acl should rewrite the dst mac. self.assertFalse(re.search( - '%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt)) + f'{rewrite_host.IP()}: ICMP echo request', tcpdump_txt)) def test_switching(self): """Tests that a acl can rewrite the destination mac address, @@ -7880,7 +7867,7 @@ def test_set_fields_generic_udp(self): dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT, dst=self.OUTPUT_MAC) - tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC + tcpdump_filter = f"ether dst {self.OUTPUT_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -7892,7 +7879,7 @@ def test_set_fields_generic_udp(self): self.IPV4_DST_VAL, self.UDP_DST_PORT), tcpdump_txt)) # check the packet's converted dscp value - self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt)) + self.assertTrue(re.search(f"tos {hex(self.NW_TOS_VAL)}", tcpdump_txt)) def test_set_fields_icmp(self): # Send a basic ICMP packet through the faucet pipeline and verify that @@ -7905,7 +7892,7 @@ def test_set_fields_icmp(self): self.SRC_MAC, source_host.defaultIntf(), source_host.IP(), dest_host.IP(), dst=self.OUTPUT_MAC) - tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC + tcpdump_filter = f"ether dst {self.OUTPUT_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -7984,7 +7971,7 @@ def test_set_fields_generic_udp(self): dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT, dst=self.OUTPUT_MAC) - tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC + tcpdump_filter = f"ether dst {self.OUTPUT_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -7996,7 +7983,7 @@ def test_set_fields_generic_udp(self): self.IPV4_DST_VAL, self.UDP_DST_PORT), tcpdump_txt)) # check the packet's converted dscp value - self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt)) + self.assertTrue(re.search(f"tos {hex(self.NW_TOS_VAL)}", tcpdump_txt)) def test_set_fields_icmp(self): # Send a basic ICMP packet through the faucet pipeline and verify that @@ -8009,7 +7996,7 @@ def test_set_fields_icmp(self): self.SRC_MAC, source_host.defaultIntf(), source_host.IP(), dest_host.IP(), dst=self.OUTPUT_MAC) - tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC + tcpdump_filter = f"ether dst {self.OUTPUT_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -8082,7 +8069,7 @@ def test_untagged(self): scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184, source_host.defaultIntf()) - tcpdump_filter = "ether dst %s" % self.REWRITE_MAC + tcpdump_filter = f"ether dst {self.REWRITE_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -8152,7 +8139,7 @@ def test_untagged(self): scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184, source_host.defaultIntf()) - tcpdump_filter = "ether dst %s" % self.REWRITE_MAC + tcpdump_filter = f"ether dst {self.REWRITE_MAC}" tcpdump_txt = self.tcpdump_helper( dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)], root_intf=True, packets=1) @@ -8178,16 +8165,16 @@ def wait_for_host_removed(self, host, in_port, timeout=5): for _ in range(timeout): if not self.host_learned(host, in_port=in_port, timeout=1): return - self.fail('host %s still learned' % host) + self.fail(f'host {host} still learned') def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30): pattern = "OFPFlowRemoved" mac = None if src_mac: - pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac + pattern = f"OFPFlowRemoved(.*)'eth_src': '{src_mac}'" mac = src_mac if dst_mac: - pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac + pattern = f"OFPFlowRemoved(.*)'eth_dst': '{dst_mac}'" mac = dst_mac for _ in range(timeout): for _, debug_log_name in self._get_ofchannel_logs(): @@ -8196,7 +8183,7 @@ def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30): if re.search(pattern, debug): return time.sleep(1) - self.fail('Not received OFPFlowRemoved for host %s' % mac) + self.fail(f'Not received OFPFlowRemoved for host {mac}') def wait_for_host_log_msg(self, host_mac, msg): host_log_re = r'.*%s %s.*' % (msg, host_mac) @@ -8223,7 +8210,7 @@ def test_untagged(self): self.ping_all_when_learned() first_host, second_host, third_host, fourth_host = self.hosts_name_ordered() self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24')) - first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC())) + first_host.cmd(f'arp -s {second_host.IP()} {second_host.MAC()}') first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP()) for host in (second_host, third_host, fourth_host): self.host_drop_all_ips(host) @@ -8346,7 +8333,7 @@ def bad_flow_mod(self): options = random.sample(self.bad_options, random.randint(2, len(self.bad_options))) for option in options: - param = getattr(self, 'bad_%s' % option)() + param = getattr(self, f'bad_{option}')() flow_mod.update(param) return flow_mod @@ -8361,7 +8348,7 @@ def tearDown(self, ignore_oferrors=True): oferrors = super().tearDown(ignore_oferrors) oferrors = re.findall(r'type: (\w+)', oferrors) counter = collections.Counter(oferrors) - error('Ignored OF error count: %s\n' % dict(counter)) + error(f'Ignored OF error count: {dict(counter)}\n') # TODO: ensure at least one error is always generated. # pylint: disable=arguments-differ @@ -8401,7 +8388,7 @@ def setUp(self): if self.config and self.config.get('hw_switch', False): self.N_UNTAGGED = min(len(self.config['dp_ports']), self.N_UNTAGGED) - error('(%d ports) ' % self.N_UNTAGGED) + error(f'({self.N_UNTAGGED} ports) ') super().setUp() From 21c53ee2bf296663e30195240dd183c2f0425697 Mon Sep 17 00:00:00 2001 From: cglewis Date: Tue, 21 Sep 2021 15:05:05 -0700 Subject: [PATCH 103/231] f-string updates for pylint --- clib/docker_host.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/docker_host.py b/clib/docker_host.py index 4d400155db..a35612b7f4 100644 --- a/clib/docker_host.py +++ b/clib/docker_host.py @@ -100,7 +100,7 @@ def startShell(self, mnopts=None): base_cmd = ["docker", "run", "-ti", "--privileged", "--entrypoint", "env", "-h", self.name, "--name", self.container] opt_args = [f'--net={self.network}'] - env_vars = self.env_vars + [f"TERM=dumb", "PS1={self.ps1}"] + env_vars = self.env_vars + ["TERM=dumb", f"PS1={self.ps1}"] env_args = reduce(operator.add, (['--env', var] for var in env_vars), []) vol_args = reduce(operator.add, (['-v', var] for var in self.vol_maps), ['-v', tmp_volume]) image_args = [self.image, "bash", "--norc", "-is", "mininet:" + self.name] From 47d46a56b47cd9b58e56cbd7e40d7936a229f6dd Mon Sep 17 00:00:00 2001 From: cglewis Date: Tue, 21 Sep 2021 16:02:16 -0700 Subject: [PATCH 104/231] fixes for stickler --- clib/fakeoftable.py | 3 ++- clib/mininet_test_base.py | 9 +++++---- faucet/dp.py | 4 ++-- faucet/valve.py | 17 +++++++++-------- faucet/valve_lldp.py | 8 ++++---- faucet/valve_switch_standalone.py | 30 +++++++++++++++--------------- faucet/valve_table.py | 4 ++-- tests/integration/mininet_tests.py | 12 ++++++------ 8 files changed, 45 insertions(+), 42 deletions(-) diff --git a/clib/fakeoftable.py b/clib/fakeoftable.py index a8444310cd..78bf817cbf 100644 --- a/clib/fakeoftable.py +++ b/clib/fakeoftable.py @@ -381,7 +381,8 @@ def _modify_strict(table, flowmod): for table in tables: entries = len(table) if entries > tfm_body.max_entries: - tfm_table_details = f'self.dp_id: table {table_id} {tfm_body.name} full ({entries}/{tfm_body.max_entries})' + tfm_table_details = f'self.dp_id: table {table_id} {tfm_body.name} ' \ + f'full ({entries}/{tfm_body.max_entries})' flow_dump = '\n\n'.join( (tfm_table_details, str(ofmsg), str(tfm_body))) raise FakeOFTableException(flow_dump) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index f521d18cb6..b24addaa09 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -1678,7 +1678,8 @@ def _update_conf(conf_path, yaml_conf): new_mac_table, msg=f'no host cache for VLAN {host_cache}') self.assertEqual( old_mac_table, new_mac_table, - msg=f'host cache for VLAN {host_cache} not same over reload (old {old_mac_table}, new {new_mac_table})') + msg=f'host cache for VLAN {host_cache} not same over reload ' + f'(old {old_mac_table}, new {new_mac_table})') else: verify_faucet_reconf_func() return @@ -1809,7 +1810,7 @@ def verify_unicast(self, hosts, unicast_expected=True, packets=3): scapy_cmd = self.scapy_template( (f'Ether(src=\'{host_a.MAC()}\', dst=\'{host_b.MAC()}\', type={IPV4_ETH}) / ' f'IP(src=\'{host_a.IP()}\', dst=\'{host_b.IP()}\') / UDP(dport=67,sport=68)'), - host_a.defaultIntf(), count=packets) + host_a.defaultIntf(), count=packets) return self._verify_xcast(unicast_expected, packets, tcpdump_filter, scapy_cmd, host_a, host_b) @@ -1827,7 +1828,7 @@ def verify_no_bcast_to_self(self, timeout=3): bcast_cap_files.append(bcast_cap_file) host.cmd(mininet_test_util.timeout_cmd( f'tcpdump -U -n -c 1 -i {host.defaultIntf()} -w {bcast_cap_file} {tcpdump_filter} &', - tcpdump_timeout)) + tcpdump_timeout)) for host in self.hosts_name_ordered(): for bcast_cmd in ( (f'ndisc6 -w1 fe80::1 {host.defaultIntf()}'), @@ -2785,7 +2786,7 @@ def start_wpasupplicant(self, host, wpasupplicant_conf, timeout=10, log_prefix=' if wpa_ctrl_socket_path: wpa_ctrl_socket = f'-C {wpa_ctrl_socket_path}' wpasupplicant_cmd = mininet_test_util.timeout_cmd( - f'wpa_supplicant -dd -t -c {wpasupplicant_conf_file_name}' \ + f'wpa_supplicant -dd -t -c {wpasupplicant_conf_file_name}' f' -i {host.defaultIntf()} -D wired -f {wpasupplicant_log} {wpa_ctrl_socket} &', 300) host.cmd(wpasupplicant_cmd) for _ in range(timeout): diff --git a/faucet/dp.py b/faucet/dp.py index 1611682847..f41e44540a 100644 --- a/faucet/dp.py +++ b/faucet/dp.py @@ -996,7 +996,7 @@ def get_tunnel_vlan(tunnel_id_name, resolved_dst): if tunnel_vlan: # VLAN exists, i.e: user specified the VLAN so check if it is reserved test_config_condition(not tunnel_vlan.reserved_internal_vlan, ( - f'VLAN {tunnel_vlan.name} is required for use by' \ + f'VLAN {tunnel_vlan.name} is required for use by' f' tunnel {tunnel_id_name} but is not reserved')) else: # VLAN does not exist, so the ID should be the VID the user wants @@ -1009,7 +1009,7 @@ def get_tunnel_vlan(tunnel_id_name, resolved_dst): if existing_tunnel_vlan is not None: test_config_condition( existing_tunnel_vlan == tunnel_vlan.vid, - f'Cannot have multiple tunnel IDs ({existing_tunnel_vlan.vid},' \ + f'Cannot have multiple tunnel IDs ({existing_tunnel_vlan.vid},' f' {tunnel_vlan.vid}) to same destination {resolved_dst}') return tunnel_vlan diff --git a/faucet/valve.py b/faucet/valve.py index a2b0b98995..06a901d872 100644 --- a/faucet/valve.py +++ b/faucet/valve.py @@ -897,19 +897,20 @@ def lldp_handler(self, now, pkt_meta, other_valves): if port.dyn_lldp_beacon_recv_state != remote_port_state: chassis_id = str(self.dp.faucet_dp_mac) if remote_port_state: - self.logger.info(f'LLDP on {chassis_id}, {port} from {pkt_meta.eth_src} ' \ - f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})' \ + self.logger.info( + f'LLDP on {chassis_id}, {port} from {pkt_meta.eth_src} ' + f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})' f' state {port.stack_state_name(remote_port_state)}') port.dyn_lldp_beacon_recv_state = remote_port_state peer_mac_src = self.dp.ports[port.number].lldp_peer_mac if peer_mac_src and peer_mac_src != pkt_meta.eth_src: - self.logger.warning(f'Unexpected LLDP peer. Received pkt from {pkt_meta.eth_src} ' \ - f'instead of {peer_mac_src}') + self.logger.warning(f'Unexpected LLDP peer. Received pkt from {pkt_meta.eth_src} ' + f'instead of {peer_mac_src}') ofmsgs_by_valve = {} if remote_dp_id and remote_port_id: - self.logger.debug(f'FAUCET LLDP on {port} from {pkt_meta.eth_src} ' \ - f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})') + self.logger.debug(f'FAUCET LLDP on {port} from {pkt_meta.eth_src} ' + f'(remote {valve_util.dpid_log(remote_dp_id)}, port {remote_port_id})') ofmsgs_by_valve.update(self._lldp_manager.verify_lldp( port, now, self, other_valves, remote_dp_id, remote_dp_name, @@ -1082,8 +1083,8 @@ def parse_pkt_meta(self, msg): and pkt_meta.vlan and pkt_meta.vlan not in pkt_meta.port.tagged_vlans and pkt_meta.vlan != pkt_meta.port.native_vlan): - self.logger.warning(f'packet from non-stack port number ' \ - f'{pkt_meta.port.number} is not member of VLAN {pkt_meta.vlan.vid}') + self.logger.warning(f'packet from non-stack port number ' + f'{pkt_meta.port.number} is not member of VLAN {pkt_meta.vlan.vid}') return None return pkt_meta diff --git a/faucet/valve_lldp.py b/faucet/valve_lldp.py index e5886dca8d..da26c35411 100644 --- a/faucet/valve_lldp.py +++ b/faucet/valve_lldp.py @@ -91,8 +91,8 @@ def verify_lldp(self, port, now, valve, other_valves, or remote_dp_name != remote_dp.name or remote_port_id != remote_port.number): self.logger.error( - f'Stack {port} cabling incorrect, expected ' \ - f'{valve_util.dpid_log(remote_dp.dp_id)}:{remote_dp.name}:{remote_port.number}, ' \ + f'Stack {port} cabling incorrect, expected ' + f'{valve_util.dpid_log(remote_dp.dp_id)}:{remote_dp.name}:{remote_port.number}, ' f'actual {valve_util.dpid_log(remote_dp_id)}:{remote_dp_name}:{remote_port_id}') stack_correct = False self._inc_var('stack_cabling_errors') @@ -133,8 +133,8 @@ def update_stack_link_state(self, ports, now, valve, other_valves): self.notify({'STACK_STATE': { 'port': port.number, 'state': after_state}}) - self.logger.info(f'Stack {port} state {port.stack_state_name(after_state)} ' \ - f'(previous state {port.stack_state_name(before_state)}): {reason}') + self.logger.info(f'Stack {port} state {port.stack_state_name(after_state)} ' + f'(previous state {port.stack_state_name(before_state)}): {reason}') stack_changes += 1 port_up = False if port.is_stack_up(): diff --git a/faucet/valve_switch_standalone.py b/faucet/valve_switch_standalone.py index bfbecc08a1..aee0a2a55b 100644 --- a/faucet/valve_switch_standalone.py +++ b/faucet/valve_switch_standalone.py @@ -485,8 +485,8 @@ def ban_rules(self, pkt_meta): self.eth_src_table.match(in_port=port.number))) port.dyn_learn_ban_count += 1 self.logger.info( - f'max hosts {port.max_hosts} reached on {port}, ' \ - f'temporarily banning learning on this port, ' \ + f'max hosts {port.max_hosts} reached on {port}, ' + f'temporarily banning learning on this port, ' f'and not learning {eth_src}') if vlan is not None and vlan.max_hosts: hosts_count = vlan.hosts_count() @@ -494,8 +494,8 @@ def ban_rules(self, pkt_meta): ofmsgs.append(self._temp_ban_host_learning(self.eth_src_table.match(vlan=vlan))) vlan.dyn_learn_ban_count += 1 self.logger.info( - f'max hosts {vlan.max_hosts} reached on VLAN {vlan.vid}, ' \ - f'temporarily banning learning on this VLAN, ' \ + f'max hosts {vlan.max_hosts} reached on VLAN {vlan.vid}, ' + f'temporarily banning learning on this VLAN, ' f'and not learning {eth_src} on {port}') return ofmsgs @@ -690,8 +690,8 @@ def _loop_protect_check(self, entry, vlan, now, eth_src, port, ofmsgs, # pylint if port != cache_port and cache_age < self.cache_update_guard_time: learn_ban = True port.dyn_learn_ban_count += 1 - self.logger.info(f'rapid move of {eth_src} from {cache_port} ' \ - f'to {port}, temp loop ban {port}') + self.logger.info(f'rapid move of {eth_src} from {cache_port} ' + f'to {port}, temp loop ban {port}') # already, or newly in protect mode, apply the ban rules. if learn_ban: @@ -773,9 +773,9 @@ def lacp_update_actor_state(self, port, lacp_up, now=None, lacp_pkt=None, cold_s lacp_up, now=now, lacp_pkt=lacp_pkt, cold_start=cold_start) if prev_actor_state != new_actor_state: - self.logger.info(f'LAG {port.lacp} {port} actor state ' \ - f'{port.actor_state_name(new_actor_state)} (previous state ' \ - f'{port.actor_state_name(prev_actor_state)})') + self.logger.info(f'LAG {port.lacp} {port} actor state ' + f'{port.actor_state_name(new_actor_state)} (previous state ' + f'{port.actor_state_name(prev_actor_state)})') return prev_actor_state != new_actor_state def enable_forwarding(self, port): @@ -808,8 +808,8 @@ def lacp_req_reply(self, lacp_pkt, port): for peer_num in port.lacp_passthrough: lacp_peer = self.ports.get(peer_num, None) if not lacp_peer.dyn_lacp_up: - self.logger.warning(f'Suppressing LACP LAG {port.lacp} on ' \ - f'{port}, peer {lacp_peer} link is down') + self.logger.warning(f'Suppressing LACP LAG {port.lacp} on ' + f'{port}, peer {lacp_peer} link is down') return [] actor_state_activity = 0 if port.lacp_active: @@ -873,9 +873,9 @@ def lacp_update_port_selection_state(self, port, valve, other_valves=None, cold_ prev_state = port.lacp_port_state() new_state = port.lacp_port_update(valve.dp.dp_id == nominated_dpid, cold_start=cold_start) if new_state != prev_state: - self.logger.info(f'LAG {port.lacp} {port} ' \ - f'{port.port_role_name(new_state)} ' \ - f'(previous state{port.port_role_name(prev_state)})') + self.logger.info(f'LAG {port.lacp} {port} ' + f'{port.port_role_name(new_state)} ' + f'(previous state{port.port_role_name(prev_state)})') return new_state != prev_state def lacp_handler(self, now, pkt_meta, valve, other_valves, lacp_update): @@ -925,7 +925,7 @@ def lacp_handler(self, now, pkt_meta, valve, other_valves, lacp_update): other_actor_system = other_lag_port.dyn_last_lacp_pkt.actor_system if actor_system != other_actor_system: self.logger.error( - f'LACP actor system mismatch {pkt_meta.port}: ' \ + f'LACP actor system mismatch {pkt_meta.port}: ' f'{actor_system}, {other_lag_port} {other_actor_system}') return ofmsgs_by_valve diff --git a/faucet/valve_table.py b/faucet/valve_table.py index d2cd7aeb76..ad2905319f 100644 --- a/faucet/valve_table.py +++ b/faucet/valve_table.py @@ -121,11 +121,11 @@ def _verify_flowmod(self, flowmod): config_mask = self.match_types[match_type] flow_mask = isinstance(match_field, tuple) assert config_mask or (not config_mask and not flow_mask), ( - f'{match_type} configured mask {config_mask} but flow mask ' \ + f'{match_type} configured mask {config_mask} but flow mask ' f'{flow_mask} in table {self.name} ({flowmod})') if self.exact_match and match_fields: assert len(self.match_types) == len(match_fields), ( - f'exact match table {self.name} matches {self.match_types} ' \ + f'exact match table {self.name} matches {self.match_types} ' f'do not match flow matches {match_fields} ({flowmod})') def _trim_actions(self, actions): diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index 5bd20b418d..6896ec0090 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -358,7 +358,8 @@ def insert_dynamic_values(dot1x_expected_events): for expected_event in dot1x_expected_events: self.assertTrue(expected_event in events_that_happened, - msg=f'expected event: {expected_event} not in events_that_happened {events_that_happened}') + msg=f'expected event: {expected_event} not in ' + f'events_that_happened {events_that_happened}') @staticmethod def _eapol_filter(fields): @@ -1194,7 +1195,7 @@ def test_untagged(self): self.wait_until_matching_flow( {'vlan_vid': radius_vid1}, table_id=self._FLOOD_TABLE, - actions=[f'POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no3}']) + actions=['POP_VLAN', f'OUTPUT:{port_no1}', f'OUTPUT:{port_no3}']) self.wait_until_matching_flow( {'vlan_vid': vid}, table_id=self._FLOOD_TABLE, @@ -2794,8 +2795,8 @@ def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts): time.sleep(1) first_host_diag = first_host.cmd('ifconfig -a ; arp -an') second_host_diag = second_host.cmd('ifconfig -a ; arp -an') - self.fail(f'{mac_ips} cannot be learned ({macs_learned} != {fping_out})\n' \ - f'first host {first_host_diag}\nsecond host {second_host_diag}\n') + self.fail(f'{mac_ips} cannot be learned ({macs_learned} != {fping_out})\n' + f'first host {first_host_diag}\nsecond host {second_host_diag}\n') def test_untagged(self): first_host, second_host = self.hosts_name_ordered()[:2] @@ -4284,8 +4285,7 @@ def require_linux_bond_up(): f'ip address flush dev {bond_member}')) # Configure bond interface self.quiet_commands(first_host, ( - f'ip link add {bond} address 0e:00:00:00:00:99 ' \ - f'type bond mode 802.3ad lacp_rate fast miimon 100', + f'ip link add {bond} address 0e:00:00:00:00:99 type bond mode 802.3ad lacp_rate fast miimon 100', f'ip add add {orig_ip}/24 dev {bond}', f'ip link set {bond} up')) # Add bond members From 62bf3fecb1b70a272092d37fab27fe53204a9bc1 Mon Sep 17 00:00:00 2001 From: Charlie Lewis Date: Mon, 27 Sep 2021 19:16:28 -0700 Subject: [PATCH 105/231] Update setup.py --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 97acd3a31a..e2ca068682 100755 --- a/setup.py +++ b/setup.py @@ -89,7 +89,7 @@ def setup_faucet_log(): setup( name='faucet', setup_requires=['pbr>=1.9', 'setuptools>=17.1'], - python_requires='>=3.7' + python_requires='>=3.7', pbr=True ) From 67f75f95039167e46c5deb8d461e12596adce405 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 13 Oct 2021 20:18:41 +0000 Subject: [PATCH 106/231] upgrade pyyaml 6. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f0128b10aa..d87e462ef3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,5 +6,5 @@ msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 prometheus_client==0.11.0 -pyyaml==5.4.1 +pyyaml==6.0 pytricia From a5005960f5893d798e13674eca92ac4442f0aaef Mon Sep 17 00:00:00 2001 From: cglewis Date: Mon, 18 Oct 2021 16:06:10 -0400 Subject: [PATCH 107/231] remove restrictions on renovate --- .renovaterc.json | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.renovaterc.json b/.renovaterc.json index e7578d7b3f..83b161bd9b 100644 --- a/.renovaterc.json +++ b/.renovaterc.json @@ -1,16 +1,7 @@ { - "ignoreDeps": ["eventlet"], - "separateMajorMinor": false, - "schedule": [ - "after 10pm every weekday", - "before 5am every weekday", - "every weekend" - ], - "timezone": "Pacific/Auckland", "extends": [ "config:base", - ":prHourlyLimit1", - ":preserveSemverRanges", "docker:enableMajor" - ] + ], + "ignorePaths": [] } From b8be54cef1bbeeed2160de45a0277f48d69e34f1 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 18 Oct 2021 20:39:26 +0000 Subject: [PATCH 108/231] increase test shards. --- .github/workflows/tests-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 8817b7a237..25736c3dd0 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -4,7 +4,7 @@ on: [push, pull_request] env: FILES_CHANGED: "all" - MATRIX_SHARDS: 10 + MATRIX_SHARDS: 15 jobs: sanity-tests: From 2d09438044bdbf553ee427715ae6d92d5a5d7308 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 6 Oct 2021 04:33:33 +0000 Subject: [PATCH 109/231] switch to os_ken from ryu. --- clib/clib_mininet_test_main.py | 4 +- clib/fakeoftable.py | 8 +- clib/mininet_test_base.py | 2 +- clib/mininet_test_topo.py | 14 +- clib/valve_test_lib.py | 10 +- etc/systemd/system/gauge.service | 2 +- faucet/__main__.py | 12 +- faucet/acl.py | 2 +- faucet/faucet.py | 22 +- faucet/faucet_bgp.py | 2 +- faucet/faucet_dot1x.py | 2 +- faucet/faucet_event.py | 4 +- faucet/gauge.py | 12 +- faucet/gauge_pollers.py | 6 +- faucet/prom_client.py | 2 +- faucet/valve_of.py | 14 +- faucet/valve_packet.py | 8 +- faucet/valve_route.py | 2 +- faucet/valve_ryuapp.py | 14 +- faucet/watcher.py | 2 +- ofctl_rest/ofctl_rest.py | 753 ++++++++++++++++++ ofctl_rest/wsgi.py | 320 ++++++++ requirements.txt | 2 +- test-requirements.txt | 2 + .../fuzzer/packet/display_packet_crash.py | 2 +- tests/generative/fuzzer/packet/fuzz_packet.py | 2 +- tests/generative/unit/test_topology.py | 4 +- tests/unit/faucet/test_valve.py | 10 +- tests/unit/faucet/test_valve_config.py | 2 +- tests/unit/faucet/test_valve_egress.py | 2 +- tests/unit/faucet/test_valve_stack.py | 4 +- tests/unit/faucet/test_valveapp_smoke.py | 40 +- tests/unit/gauge/test_gauge.py | 70 +- 33 files changed, 1214 insertions(+), 143 deletions(-) create mode 100644 ofctl_rest/ofctl_rest.py create mode 100644 ofctl_rest/wsgi.py diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 45ed8bfe6b..6ae6dbc6ea 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -60,8 +60,8 @@ EXTERNAL_DEPENDENCIES = ( - ('ryu-manager', ['--version'], - 'ryu-manager', r'ryu-manager (\d+\.\d+)\n', "4.9"), + ('osken-manager', ['--version'], + 'osken-manager', r'osken-manager(\d+\.\d+)\n', "2.1"), ('ovs-vsctl', ['--version'], 'Open vSwitch', r'ovs-vsctl\s+\(Open vSwitch\)\s+(\d+\.\d+)\.\d+\n', "2.3"), ('tcpdump', ['-h'], 'tcpdump', diff --git a/clib/fakeoftable.py b/clib/fakeoftable.py index 78bf817cbf..b4bddc7566 100644 --- a/clib/fakeoftable.py +++ b/clib/fakeoftable.py @@ -28,10 +28,10 @@ from bitstring import Bits -from ryu.ofproto import ofproto_v1_3 as ofp -from ryu.ofproto import ofproto_v1_3_parser as parser -from ryu.ofproto import ofproto_parser as ofp_parser -from ryu.lib import addrconv +from os_ken.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3_parser as parser +from os_ken.ofproto import ofproto_parser as ofp_parser +from os_ken.lib import addrconv CONTROLLER_PORT = 4294967293 diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index 3a3edea4ae..f7a91b08d2 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -26,7 +26,7 @@ import netaddr import requests -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3 as ofp from mininet.link import Intf as HWIntf # pylint: disable=import-error from mininet.log import error, output # pylint: disable=import-error diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index 13bc8952ec..7abf6fdaaf 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -551,10 +551,8 @@ def _command(self, env, tmpdir, name, args): def ryu_pid(self): """Return PID of ryu-manager process.""" if os.path.exists(self.pid_file) and os.path.getsize(self.pid_file) > 0: - pid = None with open(self.pid_file, encoding='utf-8') as pid_file: - pid = int(pid_file.read()) - return pid + return int(pid_file.read()) return None def listen_port(self, port, state='LISTEN'): @@ -639,7 +637,7 @@ def stop(self): # pylint: disable=arguments-differ class FAUCET(BaseFAUCET): """Start a FAUCET controller.""" - START_ARGS = ['--ryu-app=ryu.app.ofctl_rest'] + START_ARGS = ['--ryu-app-lists=%s' % (os.path.dirname(os.path.realpath(__file__)) + '/../ofctl_rest/ofctl_rest.py')] def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, ctl_privkey, ctl_cert, ca_certs, @@ -647,10 +645,10 @@ def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, self.prom_port = prom_port self.ofctl_port = mininet_test_util.find_free_port( ports_sock, test_name) - cargs = ' '.join(( - f'--ryu-wsapi-host={mininet_test_util.LOCALHOSTV6}', - f'--ryu-wsapi-port={self.ofctl_port}', - self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs))) + env['OFCTL_PORT'] = str(self.ofctl_port) + env['OFCTL_HOST'] = mininet_test_util.LOCALHOSTV6 + cargs = ' '.join( + self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs)) super().__init__( name, tmpdir, diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index 5ebf3cd244..aced72e331 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -34,12 +34,12 @@ import unittest import yaml -from ryu.lib import mac -from ryu.lib.packet import ( +from os_ken.lib import mac +from os_ken.lib.packet import ( arp, ethernet, icmp, icmpv6, ipv4, ipv6, lldp, slow, packet, vlan) -from ryu.ofproto import ether, inet -from ryu.ofproto import ofproto_v1_3 as ofp -from ryu.ofproto import ofproto_v1_3_parser as parser +from os_ken.ofproto import ether, inet +from os_ken.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3_parser as parser from prometheus_client import CollectorRegistry from beka.route import RouteAddition, RouteRemoval from beka.ip import IPAddress, IPPrefix diff --git a/etc/systemd/system/gauge.service b/etc/systemd/system/gauge.service index 54eed3a723..1867694572 100644 --- a/etc/systemd/system/gauge.service +++ b/etc/systemd/system/gauge.service @@ -7,7 +7,7 @@ Wants=network-online.target EnvironmentFile=/etc/default/gauge User=faucet Group=faucet -ExecStart=/usr/local/bin/gauge --ryu-config-file=${GAUGE_RYU_CONF} --ryu-ofp-tcp-listen-port=${GAUGE_LISTEN_PORT} --ryu-wsapi-host=${WSAPI_LISTEN_HOST} --ryu-app=ryu.app.ofctl_rest +ExecStart=/usr/local/bin/gauge --ryu-config-file=${GAUGE_RYU_CONF} --ryu-ofp-tcp-listen-port=${GAUGE_LISTEN_PORT} Restart=always [Install] diff --git a/faucet/__main__.py b/faucet/__main__.py index ae435debdc..aa61b72350 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -57,8 +57,6 @@ ('ofp-tcp-listen-port', 'openflow tcp listen port (default: 6653)'), ('pid-file', 'pid file name'), ('user-flags', 'Additional flags file for user applications'), - ('wsapi-host', 'webapp listen host (default 0.0.0.0)'), - ('wsapi-port', 'webapp listen port (default 8080)') ] @@ -81,7 +79,7 @@ def parse_args(sys_args): args.add_argument( '--use-syslog', action='store_true', help='output to syslog') args.add_argument( - '--ryu-app', + '--ryu-app-lists', action='append', help='add Ryu app (can be specified multiple times)', metavar='APP') @@ -131,7 +129,7 @@ def build_ryu_args(argv): for arg, val in vars(args).items(): if not val or not arg.startswith('ryu'): continue - if arg == 'ryu_app': + if arg == 'ryu_app_lists': continue if arg == 'ryu_config_file' and not os.path.isfile(val): continue @@ -145,11 +143,11 @@ def build_ryu_args(argv): ryu_args.append('faucet.faucet') # Check for additional Ryu apps. - if args.ryu_app: - ryu_args.extend(args.ryu_app) + if args.ryu_app_lists: + ryu_args.extend(args.ryu_app_lists) # Replace current process with ryu-manager from PATH (no PID change). - ryu_args.insert(0, 'ryu-manager') + ryu_args.insert(0, 'osken-manager') return ryu_args diff --git a/faucet/acl.py b/faucet/acl.py index 14de18556a..a194178a6d 100644 --- a/faucet/acl.py +++ b/faucet/acl.py @@ -18,7 +18,7 @@ import copy import netaddr -from ryu.ofproto import ether +from os_ken.ofproto import ether from faucet import valve_of from faucet import valve_acl diff --git a/faucet/faucet.py b/faucet/faucet.py index 8a7ae17ad3..09fb7c3a47 100644 --- a/faucet/faucet.py +++ b/faucet/faucet.py @@ -1,4 +1,4 @@ -"""RyuApp shim between Ryu and Valve.""" +"""OSKenApp shim between Ryu and Valve.""" # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. @@ -27,15 +27,15 @@ from functools import partial -from ryu.controller.handler import CONFIG_DISPATCHER -from ryu.controller.handler import MAIN_DISPATCHER -from ryu.controller.handler import set_ev_cls -from ryu.controller import dpset -from ryu.controller import event -from ryu.controller import ofp_event -from ryu.lib import hub +from os_ken.controller.handler import CONFIG_DISPATCHER +from os_ken.controller.handler import MAIN_DISPATCHER +from os_ken.controller.handler import set_ev_cls +from os_ken.controller import dpset +from os_ken.controller import event +from os_ken.controller import ofp_event +from os_ken.lib import hub -from faucet.valve_ryuapp import EventReconfigure, RyuAppBase +from faucet.valve_ryuapp import EventReconfigure, OSKenAppBase from faucet.valve_util import dpid_log, kill_on_exception from faucet import faucet_event from faucet import faucet_bgp @@ -82,8 +82,8 @@ class EventFaucetEventSockHeartbeat(event.EventBase): # pylint: disable=too-few """ -class Faucet(RyuAppBase): - """A RyuApp that implements an L2/L3 learning VLAN switch. +class Faucet(OSKenAppBase): + """A OSKenApp that implements an L2/L3 learning VLAN switch. Valve provides the switch implementation; this is a shim for the Ryu event handling framework to interface with Valve. diff --git a/faucet/faucet_bgp.py b/faucet/faucet_bgp.py index 4e7fbaf32b..9d2b0b3318 100644 --- a/faucet/faucet_bgp.py +++ b/faucet/faucet_bgp.py @@ -20,7 +20,7 @@ import ipaddress -from ryu.lib import hub +from os_ken.lib import hub from beka.beka import Beka from faucet.valve_util import kill_on_exception diff --git a/faucet/faucet_dot1x.py b/faucet/faucet_dot1x.py index b774cdb43b..ff18e9f360 100644 --- a/faucet/faucet_dot1x.py +++ b/faucet/faucet_dot1x.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ryu.lib import hub +from os_ken.lib import hub from chewie import chewie from faucet.valve_util import kill_on_exception diff --git a/faucet/faucet_event.py b/faucet/faucet_event.py index 10bea94158..0072073782 100644 --- a/faucet/faucet_event.py +++ b/faucet/faucet_event.py @@ -25,8 +25,8 @@ import eventlet -from ryu.lib import hub -from ryu.lib.hub import StreamServer +from os_ken.lib import hub +from os_ken.lib.hub import StreamServer class NonBlockLock: diff --git a/faucet/gauge.py b/faucet/gauge.py index c215ad6b16..4c21250566 100644 --- a/faucet/gauge.py +++ b/faucet/gauge.py @@ -1,4 +1,4 @@ -"""RyuApp shim between Ryu and Gauge.""" +"""OSKenApp shim between Ryu and Gauge.""" # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2019 The Contributors @@ -18,9 +18,9 @@ import time -from ryu.controller.handler import MAIN_DISPATCHER -from ryu.controller.handler import set_ev_cls -from ryu.controller import ofp_event +from os_ken.controller.handler import MAIN_DISPATCHER +from os_ken.controller.handler import set_ev_cls +from os_ken.controller import ofp_event from faucet import valve_of from faucet.conf import InvalidConfigError @@ -29,12 +29,12 @@ from faucet.gauge_prom import GaugePrometheusClient from faucet.valves_manager import ConfigWatcher from faucet.valve_of import ofp, parser -from faucet.valve_ryuapp import EventReconfigure, RyuAppBase +from faucet.valve_ryuapp import EventReconfigure, OSKenAppBase from faucet.valve_util import dpid_log, kill_on_exception from faucet.watcher import watcher_factory -class Gauge(RyuAppBase): +class Gauge(OSKenAppBase): """Ryu app for polling Faucet controlled datapaths for stats/state. It can poll multiple datapaths. The configuration files for each datapath diff --git a/faucet/gauge_pollers.py b/faucet/gauge_pollers.py index 0cb782dea6..a2f6eb3c8e 100644 --- a/faucet/gauge_pollers.py +++ b/faucet/gauge_pollers.py @@ -20,9 +20,9 @@ import random import time -from ryu.lib import hub -from ryu.ofproto import ofproto_v1_3 as ofp -from ryu.ofproto import ofproto_v1_3_parser as parser +from os_ken.lib import hub +from os_ken.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3_parser as parser from faucet.valve_of import devid_present from faucet.valve_of_old import OLD_MATCH_FIELDS diff --git a/faucet/prom_client.py b/faucet/prom_client.py index 240b10c68c..e4ba07ba2f 100644 --- a/faucet/prom_client.py +++ b/faucet/prom_client.py @@ -20,7 +20,7 @@ from urllib.parse import parse_qs -from ryu.lib import hub +from os_ken.lib import hub from pbr.version import VersionInfo from prometheus_client import Gauge as PromGauge from prometheus_client import generate_latest, CONTENT_TYPE_LATEST, REGISTRY diff --git a/faucet/valve_of.py b/faucet/valve_of.py index f2a0c27187..58eace2fe1 100644 --- a/faucet/valve_of.py +++ b/faucet/valve_of.py @@ -24,15 +24,15 @@ import random # pylint: disable=unused-import -from ryu.lib import mac # noqa: F401 -from ryu.lib import ofctl_v1_3 as ofctl -from ryu.lib.ofctl_utils import ( +from os_ken.lib import mac # noqa: F401 +from os_ken.lib import ofctl_v1_3 as ofctl +from os_ken.lib.ofctl_utils import ( str_to_int, to_match_ip, to_match_masked_int, to_match_eth, to_match_vid, OFCtlUtil) -from ryu.ofproto import ether +from os_ken.ofproto import ether # pylint: disable=unused-import -from ryu.ofproto import inet # noqa: F401 -from ryu.ofproto import ofproto_v1_3 as ofp -from ryu.ofproto import ofproto_v1_3_parser as parser +from os_ken.ofproto import inet # noqa: F401 +from os_ken.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3_parser as parser from faucet.conf import test_config_condition, InvalidConfigError from faucet.valve_of_old import OLD_MATCH_FIELDS diff --git a/faucet/valve_packet.py b/faucet/valve_packet.py index 853deda9b4..01aae4bc5b 100644 --- a/faucet/valve_packet.py +++ b/faucet/valve_packet.py @@ -23,13 +23,13 @@ import struct from netaddr import EUI -from ryu.lib import addrconv -from ryu.lib.mac import BROADCAST, DONTCARE, is_multicast, haddr_to_bin -from ryu.lib.packet import ( +from os_ken.lib import addrconv +from os_ken.lib.mac import BROADCAST, DONTCARE, is_multicast, haddr_to_bin +from os_ken.lib.packet import ( arp, bpdu, ethernet, icmp, icmpv6, ipv4, ipv6, lldp, slow, packet, vlan) -from ryu.lib.packet.stream_parser import StreamParser +from os_ken.lib.packet.stream_parser import StreamParser from faucet import valve_util from faucet import valve_of diff --git a/faucet/valve_route.py b/faucet/valve_route.py index a2cf648519..89509ac740 100644 --- a/faucet/valve_route.py +++ b/faucet/valve_route.py @@ -25,7 +25,7 @@ import ipaddress -from ryu.lib.packet import arp, icmp, icmpv6, ipv4, ipv6 +from os_ken.lib.packet import arp, icmp, icmpv6, ipv4, ipv6 from faucet import valve_of from faucet import valve_packet diff --git a/faucet/valve_ryuapp.py b/faucet/valve_ryuapp.py index deb3cd31a1..ddfdc9ecd2 100644 --- a/faucet/valve_ryuapp.py +++ b/faucet/valve_ryuapp.py @@ -1,4 +1,4 @@ -"""RyuApp base class for FAUCET/Gauge.""" +"""OSKenApp base class for FAUCET/Gauge.""" # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. @@ -23,10 +23,10 @@ import signal import sys -from ryu.base import app_manager -from ryu.controller import dpset, event -from ryu.controller.handler import set_ev_cls -from ryu.lib import hub +from os_ken.base import app_manager +from os_ken.controller import dpset, event +from os_ken.controller.handler import set_ev_cls +from os_ken.lib import hub from faucet import valve_of from faucet.valve_util import dpid_log, get_logger, get_setting @@ -40,8 +40,8 @@ class EventReconfigure(event.EventBase): """Event sent to controller to cause config reload.""" -class RyuAppBase(app_manager.RyuApp): - """RyuApp base class for FAUCET/Gauge.""" +class OSKenAppBase(app_manager.OSKenApp): + """OSKenApp base class for FAUCET/Gauge.""" OFP_VERSIONS = valve_of.OFP_VERSIONS _CONTEXTS = { diff --git a/faucet/watcher.py b/faucet/watcher.py index d79ab26e66..a6a7cfe558 100644 --- a/faucet/watcher.py +++ b/faucet/watcher.py @@ -23,7 +23,7 @@ import gzip import time -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3 as ofp from faucet.conf import InvalidConfigError from faucet.valve_util import dpid_log diff --git a/ofctl_rest/ofctl_rest.py b/ofctl_rest/ofctl_rest.py new file mode 100644 index 0000000000..cae83a369e --- /dev/null +++ b/ofctl_rest/ofctl_rest.py @@ -0,0 +1,753 @@ +# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +import json +import ast + +from os_ken.base import app_manager +from os_ken.controller import ofp_event +from os_ken.controller import dpset +from os_ken.controller.handler import MAIN_DISPATCHER +from os_ken.controller.handler import set_ev_cls +from os_ken.exception import OSKenException +from os_ken.ofproto import ofproto_v1_3 +from os_ken.lib import ofctl_v1_3 +from os_ken.lib import hub +from wsgi import ControllerBase +from wsgi import Response +from wsgi import WSGIApplication, WSGIServer + +LOG = logging.getLogger('os_ken.app.ofctl_rest') + +DEFAULT_WSGI_HOST = '0.0.0.0' +DEFAULT_WSGI_PORT = 8080 + +OFCTL_HOST = os.getenv('OFCTL_HOST', '0.0.0.0') +OFCTL_PORT = int(os.getenv('OFCTL_PORT', '8080')) + +# supported ofctl versions in this restful app +supported_ofctl = { + ofproto_v1_3.OFP_VERSION: ofctl_v1_3, +} + +# REST API +# + +# Retrieve the switch stats +# +# get the list of all switches +# GET /stats/switches +# +# get the desc stats of the switch +# GET /stats/desc/ +# +# get flows desc stats of the switch +# GET /stats/flowdesc/ +# +# get flows desc stats of the switch filtered by the fields +# POST /stats/flowdesc/ +# +# get flows stats of the switch +# GET /stats/flow/ +# +# get flows stats of the switch filtered by the fields +# POST /stats/flow/ +# +# get aggregate flows stats of the switch +# GET /stats/aggregateflow/ +# +# get aggregate flows stats of the switch filtered by the fields +# POST /stats/aggregateflow/ +# +# get table stats of the switch +# GET /stats/table/ +# +# get table features stats of the switch +# GET /stats/tablefeatures/ +# +# get ports stats of the switch +# GET /stats/port/[/] +# Note: Specification of port number is optional +# +# get queues stats of the switch +# GET /stats/queue/[/[/]] +# Note: Specification of port number and queue id are optional +# If you want to omitting the port number and setting the queue id, +# please specify the keyword "ALL" to the port number +# e.g. GET /stats/queue/1/ALL/1 +# +# get queues config stats of the switch +# GET /stats/queueconfig/[/] +# Note: Specification of port number is optional +# +# get queues desc stats of the switch +# GET /stats/queuedesc/[/[/]] +# Note: Specification of port number and queue id are optional +# If you want to omitting the port number and setting the queue id, +# please specify the keyword "ALL" to the port number +# e.g. GET /stats/queuedesc/1/ALL/1 +# +# get meter features stats of the switch +# GET /stats/meterfeatures/ +# +# get meter config stats of the switch +# GET /stats/meterconfig/[/] +# Note: Specification of meter id is optional +# +# get meter desc stats of the switch +# GET /stats/meterdesc/[/] +# Note: Specification of meter id is optional +# +# get meters stats of the switch +# GET /stats/meter/[/] +# Note: Specification of meter id is optional +# +# get group features stats of the switch +# GET /stats/groupfeatures/ +# +# get groups desc stats of the switch +# GET /stats/groupdesc/[/] +# Note: Specification of group id is optional (OpenFlow 1.5 or later) +# +# get groups stats of the switch +# GET /stats/group/[/] +# Note: Specification of group id is optional +# +# get ports description of the switch +# GET /stats/portdesc/[/] +# Note: Specification of port number is optional (OpenFlow 1.5 or later) + +# Update the switch stats +# +# add a flow entry +# POST /stats/flowentry/add +# +# modify all matching flow entries +# POST /stats/flowentry/modify +# +# modify flow entry strictly matching wildcards and priority +# POST /stats/flowentry/modify_strict +# +# delete all matching flow entries +# POST /stats/flowentry/delete +# +# delete flow entry strictly matching wildcards and priority +# POST /stats/flowentry/delete_strict +# +# delete all flow entries of the switch +# DELETE /stats/flowentry/clear/ +# +# add a meter entry +# POST /stats/meterentry/add +# +# modify a meter entry +# POST /stats/meterentry/modify +# +# delete a meter entry +# POST /stats/meterentry/delete +# +# add a group entry +# POST /stats/groupentry/add +# +# modify a group entry +# POST /stats/groupentry/modify +# +# delete a group entry +# POST /stats/groupentry/delete +# +# modify behavior of the physical port +# POST /stats/portdesc/modify +# +# modify role of controller +# POST /stats/role +# +# +# send a experimeter message +# POST /stats/experimenter/ + + +class CommandNotFoundError(OSKenException): + message = 'No such command : %(cmd)s' + + +class PortNotFoundError(OSKenException): + message = 'No such port info: %(port_no)s' + + +def stats_method(method): + def wrapper(self, req, dpid, *args, **kwargs): + # Get datapath instance from DPSet + try: + dp = self.dpset.get(int(str(dpid), 0)) + except ValueError: + LOG.exception('Invalid dpid: %s', dpid) + return Response(status=400) + if dp is None: + LOG.error('No such Datapath: %s', dpid) + return Response(status=404) + + # Get lib/ofctl_* module + try: + ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) + except KeyError: + LOG.exception('Unsupported OF version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + # Invoke StatsController method + try: + ret = method(self, req, dp, ofctl, *args, **kwargs) + return Response(content_type='application/json', + body=json.dumps(ret)) + except ValueError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + except AttributeError: + LOG.exception('Unsupported OF request in this version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + return wrapper + + +def command_method(method): + def wrapper(self, req, *args, **kwargs): + # Parse request json body + try: + if req.body: + # We use ast.literal_eval() to parse request json body + # instead of json.loads(). + # Because we need to parse binary format body + # in send_experimenter(). + body = ast.literal_eval(req.body.decode('utf-8')) + else: + body = {} + except SyntaxError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + + # Get datapath_id from request parameters + dpid = body.get('dpid', None) + if not dpid: + try: + dpid = kwargs.pop('dpid') + except KeyError: + LOG.exception('Cannot get dpid from request parameters') + return Response(status=400) + + # Get datapath instance from DPSet + try: + dp = self.dpset.get(int(str(dpid), 0)) + except ValueError: + LOG.exception('Invalid dpid: %s', dpid) + return Response(status=400) + if dp is None: + LOG.error('No such Datapath: %s', dpid) + return Response(status=404) + + # Get lib/ofctl_* module + try: + ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) + except KeyError: + LOG.exception('Unsupported OF version: version=%s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + + # Invoke StatsController method + try: + method(self, req, dp, ofctl, body, *args, **kwargs) + return Response(status=200) + except ValueError: + LOG.exception('Invalid syntax: %s', req.body) + return Response(status=400) + except AttributeError: + LOG.exception('Unsupported OF request in this version: %s', + dp.ofproto.OFP_VERSION) + return Response(status=501) + except CommandNotFoundError as e: + LOG.exception(e.message) + return Response(status=404) + except PortNotFoundError as e: + LOG.exception(e.message) + return Response(status=404) + + return wrapper + + +class StatsController(ControllerBase): + def __init__(self, req, link, data, **config): + super(StatsController, self).__init__(req, link, data, **config) + self.dpset = data['dpset'] + self.waiters = data['waiters'] + + def get_dpids(self, req, **_kwargs): + dps = list(self.dpset.dps.keys()) + body = json.dumps(dps) + return Response(content_type='application/json', body=body) + + @stats_method + def get_desc_stats(self, req, dp, ofctl, **kwargs): + return ofctl.get_desc_stats(dp, self.waiters) + + @stats_method + def get_flow_desc(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_flow_desc(dp, self.waiters, flow) + + @stats_method + def get_flow_stats(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_flow_stats(dp, self.waiters, flow) + + @stats_method + def get_aggregate_flow_stats(self, req, dp, ofctl, **kwargs): + flow = req.json if req.body else {} + return ofctl.get_aggregate_flow_stats(dp, self.waiters, flow) + + @stats_method + def get_table_stats(self, req, dp, ofctl, **kwargs): + return ofctl.get_table_stats(dp, self.waiters) + + @stats_method + def get_table_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_table_features(dp, self.waiters) + + @stats_method + def get_port_stats(self, req, dp, ofctl, port=None, **kwargs): + if port == "ALL": + port = None + + return ofctl.get_port_stats(dp, self.waiters, port) + + @stats_method + def get_queue_stats(self, req, dp, ofctl, + port=None, queue_id=None, **kwargs): + if port == "ALL": + port = None + + if queue_id == "ALL": + queue_id = None + + return ofctl.get_queue_stats(dp, self.waiters, port, queue_id) + + @stats_method + def get_queue_config(self, req, dp, ofctl, port=None, **kwargs): + if port == "ALL": + port = None + + return ofctl.get_queue_config(dp, self.waiters, port) + + @stats_method + def get_queue_desc(self, req, dp, ofctl, + port=None, queue=None, **_kwargs): + if port == "ALL": + port = None + + if queue == "ALL": + queue = None + + return ofctl.get_queue_desc(dp, self.waiters, port, queue) + + @stats_method + def get_meter_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_meter_features(dp, self.waiters) + + @stats_method + def get_meter_config(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_config(dp, self.waiters, meter_id) + + @stats_method + def get_meter_desc(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_desc(dp, self.waiters, meter_id) + + @stats_method + def get_meter_stats(self, req, dp, ofctl, meter_id=None, **kwargs): + if meter_id == "ALL": + meter_id = None + + return ofctl.get_meter_stats(dp, self.waiters, meter_id) + + @stats_method + def get_group_features(self, req, dp, ofctl, **kwargs): + return ofctl.get_group_features(dp, self.waiters) + + @stats_method + def get_group_desc(self, req, dp, ofctl, group_id=None, **kwargs): + return ofctl.get_group_desc(dp, self.waiters) + + @stats_method + def get_group_stats(self, req, dp, ofctl, group_id=None, **kwargs): + if group_id == "ALL": + group_id = None + + return ofctl.get_group_stats(dp, self.waiters, group_id) + + @stats_method + def get_port_desc(self, req, dp, ofctl, port_no=None, **kwargs): + return ofctl.get_port_desc(dp, self.waiters) + + @stats_method + def get_role(self, req, dp, ofctl, **kwargs): + return ofctl.get_role(dp, self.waiters) + + @command_method + def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPFC_ADD, + 'modify': dp.ofproto.OFPFC_MODIFY, + 'modify_strict': dp.ofproto.OFPFC_MODIFY_STRICT, + 'delete': dp.ofproto.OFPFC_DELETE, + 'delete_strict': dp.ofproto.OFPFC_DELETE_STRICT, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) + + ofctl.mod_flow_entry(dp, flow, mod_cmd) + + @command_method + def delete_flow_entry(self, req, dp, ofctl, flow, **kwargs): + flow = {'table_id': dp.ofproto.OFPTT_ALL} + ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) + + @command_method + def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPMC_ADD, + 'modify': dp.ofproto.OFPMC_MODIFY, + 'delete': dp.ofproto.OFPMC_DELETE, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) + + ofctl.mod_meter_entry(dp, meter, mod_cmd) + + @command_method + def mod_group_entry(self, req, dp, ofctl, group, cmd, **kwargs): + cmd_convert = { + 'add': dp.ofproto.OFPGC_ADD, + 'modify': dp.ofproto.OFPGC_MODIFY, + 'delete': dp.ofproto.OFPGC_DELETE, + } + mod_cmd = cmd_convert.get(cmd, None) + if mod_cmd is None: + raise CommandNotFoundError(cmd=cmd) + + ofctl.mod_group_entry(dp, group, mod_cmd) + + @command_method + def mod_port_behavior(self, req, dp, ofctl, port_config, cmd, **kwargs): + port_no = port_config.get('port_no', None) + port_no = int(str(port_no), 0) + + port_info = self.dpset.port_state[int(dp.id)].get(port_no) + if port_info: + port_config.setdefault('hw_addr', port_info.hw_addr) + port_config.setdefault('advertise', port_info.advertised) + else: + raise PortNotFoundError(port_no=port_no) + + if cmd != 'modify': + raise CommandNotFoundError(cmd=cmd) + + ofctl.mod_port_behavior(dp, port_config) + + @command_method + def send_experimenter(self, req, dp, ofctl, exp, **kwargs): + ofctl.send_experimenter(dp, exp) + + @command_method + def set_role(self, req, dp, ofctl, role, **kwargs): + ofctl.set_role(dp, role) + + +class RestStatsApi(app_manager.OSKenApp): + OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] + _CONTEXTS = { + 'dpset': dpset.DPSet, + 'wsgi': WSGIApplication + } + + def __init__(self, *args, **kwargs): + super(RestStatsApi, self).__init__(*args, **kwargs) + self.dpset = kwargs['dpset'] + wsgi = kwargs['wsgi'] + self.waiters = {} + self.data = {} + self.data['dpset'] = self.dpset + self.data['waiters'] = self.waiters + mapper = wsgi.mapper + + wsgi.registory['StatsController'] = self.data + path = '/stats' + uri = path + '/switches' + mapper.connect('stats', uri, + controller=StatsController, action='get_dpids', + conditions=dict(method=['GET'])) + + uri = path + '/desc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_desc_stats', + conditions=dict(method=['GET'])) + + uri = path + '/flowdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_flow_stats', + conditions=dict(method=['GET', 'POST'])) + + uri = path + '/flow/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_flow_stats', + conditions=dict(method=['GET', 'POST'])) + + uri = path + '/aggregateflow/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, + action='get_aggregate_flow_stats', + conditions=dict(method=['GET', 'POST'])) + + uri = path + '/table/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_table_stats', + conditions=dict(method=['GET'])) + + uri = path + '/tablefeatures/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_table_features', + conditions=dict(method=['GET'])) + + uri = path + '/port/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_stats', + conditions=dict(method=['GET'])) + + uri = path + '/port/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_stats', + conditions=dict(method=['GET'])) + + uri = path + '/queue/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_stats', + conditions=dict(method=['GET'])) + + uri = path + '/queue/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_stats', + conditions=dict(method=['GET'])) + + uri = path + '/queue/{dpid}/{port}/{queue_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_stats', + conditions=dict(method=['GET'])) + uri = path + '/queueconfig/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_config', + conditions=dict(method=['GET'])) + + uri = path + '/queueconfig/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_config', + conditions=dict(method=['GET'])) + + uri = path + '/queuedesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + + uri = path + '/queuedesc/{dpid}/{port}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + + uri = path + '/queuedesc/{dpid}/{port}/{queue}' + mapper.connect('stats', uri, + controller=StatsController, action='get_queue_desc', + conditions=dict(method=['GET'])) + + uri = path + '/meterfeatures/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_features', + conditions=dict(method=['GET'])) + + uri = path + '/meterconfig/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_config', + conditions=dict(method=['GET'])) + + uri = path + '/meterconfig/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_config', + conditions=dict(method=['GET'])) + + uri = path + '/meterdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_desc', + conditions=dict(method=['GET'])) + + uri = path + '/meterdesc/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_desc', + conditions=dict(method=['GET'])) + + uri = path + '/meter/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_stats', + conditions=dict(method=['GET'])) + + uri = path + '/meter/{dpid}/{meter_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_meter_stats', + conditions=dict(method=['GET'])) + + uri = path + '/groupfeatures/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_features', + conditions=dict(method=['GET'])) + + uri = path + '/groupdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_desc', + conditions=dict(method=['GET'])) + + uri = path + '/groupdesc/{dpid}/{group_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_desc', + conditions=dict(method=['GET'])) + + uri = path + '/group/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_stats', + conditions=dict(method=['GET'])) + + uri = path + '/group/{dpid}/{group_id}' + mapper.connect('stats', uri, + controller=StatsController, action='get_group_stats', + conditions=dict(method=['GET'])) + + uri = path + '/portdesc/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_desc', + conditions=dict(method=['GET'])) + + uri = path + '/portdesc/{dpid}/{port_no}' + mapper.connect('stats', uri, + controller=StatsController, action='get_port_desc', + conditions=dict(method=['GET'])) + + uri = path + '/role/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='get_role', + conditions=dict(method=['GET'])) + + uri = path + '/flowentry/{cmd}' + mapper.connect('stats', uri, + controller=StatsController, action='mod_flow_entry', + conditions=dict(method=['POST'])) + + uri = path + '/flowentry/clear/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='delete_flow_entry', + conditions=dict(method=['DELETE'])) + + uri = path + '/meterentry/{cmd}' + mapper.connect('stats', uri, + controller=StatsController, action='mod_meter_entry', + conditions=dict(method=['POST'])) + + uri = path + '/groupentry/{cmd}' + mapper.connect('stats', uri, + controller=StatsController, action='mod_group_entry', + conditions=dict(method=['POST'])) + + uri = path + '/portdesc/{cmd}' + mapper.connect('stats', uri, + controller=StatsController, action='mod_port_behavior', + conditions=dict(method=['POST'])) + + uri = path + '/experimenter/{dpid}' + mapper.connect('stats', uri, + controller=StatsController, action='send_experimenter', + conditions=dict(method=['POST'])) + + uri = path + '/role' + mapper.connect('stats', uri, + controller=StatsController, action='set_role', + conditions=dict(method=['POST'])) + + self.server = WSGIServer(wsgi, OFCTL_HOST, OFCTL_PORT) + self.server_thread = hub.spawn(self.server.serve_forever) + + @set_ev_cls([ofp_event.EventOFPStatsReply, + ofp_event.EventOFPDescStatsReply, + ofp_event.EventOFPFlowStatsReply, + ofp_event.EventOFPAggregateStatsReply, + ofp_event.EventOFPTableStatsReply, + ofp_event.EventOFPTableFeaturesStatsReply, + ofp_event.EventOFPPortStatsReply, + ofp_event.EventOFPQueueStatsReply, + ofp_event.EventOFPQueueDescStatsReply, + ofp_event.EventOFPMeterStatsReply, + ofp_event.EventOFPMeterFeaturesStatsReply, + ofp_event.EventOFPMeterConfigStatsReply, + ofp_event.EventOFPGroupStatsReply, + ofp_event.EventOFPGroupFeaturesStatsReply, + ofp_event.EventOFPGroupDescStatsReply, + ofp_event.EventOFPPortDescStatsReply + ], MAIN_DISPATCHER) + def stats_reply_handler(self, ev): + msg = ev.msg + dp = msg.datapath + + if dp.id not in self.waiters: + return + if msg.xid not in self.waiters[dp.id]: + return + lock, msgs = self.waiters[dp.id][msg.xid] + msgs.append(msg) + + flags = dp.ofproto.OFPMPF_REPLY_MORE + + if msg.flags & flags: + return + del self.waiters[dp.id][msg.xid] + lock.set() + + @set_ev_cls([ofp_event.EventOFPSwitchFeatures, + ofp_event.EventOFPQueueGetConfigReply, + ofp_event.EventOFPRoleReply, + ], MAIN_DISPATCHER) + def features_reply_handler(self, ev): + msg = ev.msg + dp = msg.datapath + + if dp.id not in self.waiters: + return + if msg.xid not in self.waiters[dp.id]: + return + lock, msgs = self.waiters[dp.id][msg.xid] + msgs.append(msg) + + del self.waiters[dp.id][msg.xid] + lock.set() diff --git a/ofctl_rest/wsgi.py b/ofctl_rest/wsgi.py new file mode 100644 index 0000000000..29fc1c6664 --- /dev/null +++ b/ofctl_rest/wsgi.py @@ -0,0 +1,320 @@ +# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. +# Copyright (C) 2012 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from types import MethodType + +from routes import Mapper +from routes.util import URLGenerator +import six +from tinyrpc.server import RPCServer +from tinyrpc.dispatch import RPCDispatcher +from tinyrpc.dispatch import public as rpc_public +from tinyrpc.protocols.jsonrpc import JSONRPCProtocol +from tinyrpc.transports import ServerTransport, ClientTransport +from tinyrpc.client import RPCClient +import webob.dec +import webob.exc +from webob.request import Request as webob_Request +from webob.response import Response as webob_Response + +from os_ken import cfg +from os_ken.lib import hub + +HEX_PATTERN = r'0x[0-9a-z]+' +DIGIT_PATTERN = r'[1-9][0-9]*' + + +def route(name, path, methods=None, requirements=None): + def _route(controller_method): + controller_method.routing_info = { + 'name': name, + 'path': path, + 'methods': methods, + 'requirements': requirements, + } + return controller_method + return _route + + +class Request(webob_Request): + """ + Wrapper class for webob.request.Request. + + The behavior of this class is the same as webob.request.Request + except for setting "charset" to "UTF-8" automatically. + """ + DEFAULT_CHARSET = "UTF-8" + + def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs): + super(Request, self).__init__( + environ, charset=charset, *args, **kwargs) + + +class Response(webob_Response): + """ + Wrapper class for webob.response.Response. + + The behavior of this class is the same as webob.response.Response + except for setting "charset" to "UTF-8" automatically. + """ + DEFAULT_CHARSET = "UTF-8" + + def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs): + super(Response, self).__init__(charset=charset, *args, **kwargs) + + +class WebSocketRegistrationWrapper(object): + + def __init__(self, func, controller): + self._controller = controller + self._controller_method = MethodType(func, controller) + + def __call__(self, ws): + wsgi_application = self._controller.parent + ws_manager = wsgi_application.websocketmanager + ws_manager.add_connection(ws) + try: + self._controller_method(ws) + finally: + ws_manager.delete_connection(ws) + + +class _AlreadyHandledResponse(Response): + # XXX: Eventlet API should not be used directly. + # https://github.com/benoitc/gunicorn/pull/2581 + from packaging import version + import eventlet + if version.parse(eventlet.__version__) >= version.parse("0.30.3"): + import eventlet.wsgi + _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) + else: + from eventlet.wsgi import ALREADY_HANDLED + _ALREADY_HANDLED = ALREADY_HANDLED + + def __call__(self, environ, start_response): + return self._ALREADY_HANDLED + +def websocket(name, path): + def _websocket(controller_func): + def __websocket(self, req, **_): + wrapper = WebSocketRegistrationWrapper(controller_func, self) + ws_wsgi = hub.WebSocketWSGI(wrapper) + ws_wsgi(req.environ, req.start_response) + # XXX: In order to prevent the writing to a already closed socket. + # This issue is caused by combined use: + # - webob.dec.wsgify() + # - eventlet.wsgi.HttpProtocol.handle_one_response() + return _AlreadyHandledResponse() + __websocket.routing_info = { + 'name': name, + 'path': path, + 'methods': None, + 'requirements': None, + } + return __websocket + return _websocket + + +class ControllerBase(object): + special_vars = ['action', 'controller'] + + def __init__(self, req, link, data, **config): + self.req = req + self.link = link + self.data = data + self.parent = None + for name, value in config.items(): + setattr(self, name, value) + + def __call__(self, req): + action = self.req.urlvars.get('action', 'index') + if hasattr(self, '__before__'): + self.__before__() + + kwargs = self.req.urlvars.copy() + for attr in self.special_vars: + if attr in kwargs: + del kwargs[attr] + + return getattr(self, action)(req, **kwargs) + + +class WebSocketDisconnectedError(Exception): + pass + + +class WebSocketServerTransport(ServerTransport): + def __init__(self, ws): + self.ws = ws + + def receive_message(self): + message = self.ws.wait() + if message is None: + raise WebSocketDisconnectedError() + context = None + return context, message + + def send_reply(self, context, reply): + self.ws.send(six.text_type(reply)) + + +class WebSocketRPCServer(RPCServer): + def __init__(self, ws, rpc_callback): + dispatcher = RPCDispatcher() + dispatcher.register_instance(rpc_callback) + super(WebSocketRPCServer, self).__init__( + WebSocketServerTransport(ws), + JSONRPCProtocol(), + dispatcher, + ) + + def serve_forever(self): + try: + super(WebSocketRPCServer, self).serve_forever() + except WebSocketDisconnectedError: + return + + def _spawn(self, func, *args, **kwargs): + hub.spawn(func, *args, **kwargs) + + +class WebSocketClientTransport(ClientTransport): + + def __init__(self, ws, queue): + self.ws = ws + self.queue = queue + + def send_message(self, message, expect_reply=True): + self.ws.send(six.text_type(message)) + + if expect_reply: + return self.queue.get() + + +class WebSocketRPCClient(RPCClient): + + def __init__(self, ws): + self.ws = ws + self.queue = hub.Queue() + super(WebSocketRPCClient, self).__init__( + JSONRPCProtocol(), + WebSocketClientTransport(ws, self.queue), + ) + + def serve_forever(self): + while True: + msg = self.ws.wait() + if msg is None: + break + self.queue.put(msg) + + +class wsgify_hack(webob.dec.wsgify): + def __call__(self, environ, start_response): + self.kwargs['start_response'] = start_response + return super(wsgify_hack, self).__call__(environ, start_response) + + +class WebSocketManager(object): + + def __init__(self): + self._connections = [] + + def add_connection(self, ws): + self._connections.append(ws) + + def delete_connection(self, ws): + self._connections.remove(ws) + + def broadcast(self, msg): + for connection in self._connections: + connection.send(msg) + + +class WSGIApplication(object): + def __init__(self, **config): + self.config = config + self.mapper = Mapper() + self.registory = {} + self._wsmanager = WebSocketManager() + super(WSGIApplication, self).__init__() + + def _match(self, req): + # Note: Invoke the new API, first. If the arguments unmatched, + # invoke the old API. + try: + return self.mapper.match(environ=req.environ) + except TypeError: + self.mapper.environ = req.environ + return self.mapper.match(req.path_info) + + @wsgify_hack + def __call__(self, req, start_response): + match = self._match(req) + + if not match: + return webob.exc.HTTPNotFound() + + req.start_response = start_response + req.urlvars = match + link = URLGenerator(self.mapper, req.environ) + + data = None + name = match['controller'].__name__ + if name in self.registory: + data = self.registory[name] + + controller = match['controller'](req, link, data, **self.config) + controller.parent = self + return controller(req) + + def register(self, controller, data=None): + def _target_filter(attr): + if not inspect.ismethod(attr) and not inspect.isfunction(attr): + return False + if not hasattr(attr, 'routing_info'): + return False + return True + methods = inspect.getmembers(controller, _target_filter) + for method_name, method in methods: + routing_info = getattr(method, 'routing_info') + name = routing_info['name'] + path = routing_info['path'] + conditions = {} + if routing_info.get('methods'): + conditions['method'] = routing_info['methods'] + requirements = routing_info.get('requirements') or {} + self.mapper.connect(name, + path, + controller=controller, + requirements=requirements, + action=method_name, + conditions=conditions) + if data: + self.registory[controller.__name__] = data + + @property + def websocketmanager(self): + return self._wsmanager + + +class WSGIServer(hub.WSGIServer): + def __init__(self, application, host, port, **config): + super(WSGIServer, self).__init__((host, port), application, **config) + + def __call__(self): + self.serve_forever() diff --git a/requirements.txt b/requirements.txt index d87e462ef3..64096ed209 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ c65beka==1.0.0 c65chewie==1.0.2 -c65ryu==5.0.0 +os_ken==2.1.0 influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 diff --git a/test-requirements.txt b/test-requirements.txt index 6ddae490fc..eb0319508a 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,3 +10,5 @@ packaging requests requirements-parser scapy==2.4.4 +webob +tinyrpc diff --git a/tests/generative/fuzzer/packet/display_packet_crash.py b/tests/generative/fuzzer/packet/display_packet_crash.py index 0a394af840..fed6507502 100644 --- a/tests/generative/fuzzer/packet/display_packet_crash.py +++ b/tests/generative/fuzzer/packet/display_packet_crash.py @@ -4,7 +4,7 @@ import sys -from ryu.controller import dpset +from os_ken.controller import dpset from faucet import faucet import fake_packet diff --git a/tests/generative/fuzzer/packet/fuzz_packet.py b/tests/generative/fuzzer/packet/fuzz_packet.py index 40f00854d6..d5530bff1d 100644 --- a/tests/generative/fuzzer/packet/fuzz_packet.py +++ b/tests/generative/fuzzer/packet/fuzz_packet.py @@ -6,7 +6,7 @@ import sys import afl -from ryu.controller import dpset +from os_ken.controller import dpset from faucet import faucet import fake_packet diff --git a/tests/generative/unit/test_topology.py b/tests/generative/unit/test_topology.py index f5f656a7de..d95a1e6849 100755 --- a/tests/generative/unit/test_topology.py +++ b/tests/generative/unit/test_topology.py @@ -22,8 +22,8 @@ import unittest import yaml -from ryu.lib import mac -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.lib import mac +from os_ken.ofproto import ofproto_v1_3 as ofp import networkx from networkx.generators.atlas import graph_atlas_g diff --git a/tests/unit/faucet/test_valve.py b/tests/unit/faucet/test_valve.py index 1fcd44af63..a77e050ad5 100644 --- a/tests/unit/faucet/test_valve.py +++ b/tests/unit/faucet/test_valve.py @@ -24,11 +24,11 @@ import unittest import yaml -from ryu.lib import mac -from ryu.lib.packet import slow -from ryu.ofproto import ether -from ryu.ofproto import ofproto_v1_3 as ofp -from ryu.ofproto import ofproto_v1_3_parser as parser +from os_ken.lib import mac +from os_ken.lib.packet import slow +from os_ken.ofproto import ether +from os_ken.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3_parser as parser from faucet import valve_of from faucet import valve_packet diff --git a/tests/unit/faucet/test_valve_config.py b/tests/unit/faucet/test_valve_config.py index 5c640413c2..3a72490a61 100755 --- a/tests/unit/faucet/test_valve_config.py +++ b/tests/unit/faucet/test_valve_config.py @@ -26,7 +26,7 @@ import unittest import time -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3 as ofp from faucet import config_parser_util from faucet import valve_of diff --git a/tests/unit/faucet/test_valve_egress.py b/tests/unit/faucet/test_valve_egress.py index 47fd5adc33..61f28c2095 100755 --- a/tests/unit/faucet/test_valve_egress.py +++ b/tests/unit/faucet/test_valve_egress.py @@ -20,7 +20,7 @@ import unittest -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.ofproto import ofproto_v1_3 as ofp from clib.valve_test_lib import CONFIG, DP1_CONFIG, FAUCET_MAC, ValveTestBases diff --git a/tests/unit/faucet/test_valve_stack.py b/tests/unit/faucet/test_valve_stack.py index fd533fe166..6ee37d390f 100644 --- a/tests/unit/faucet/test_valve_stack.py +++ b/tests/unit/faucet/test_valve_stack.py @@ -25,8 +25,8 @@ import ipaddress import yaml -from ryu.lib import mac -from ryu.ofproto import ofproto_v1_3 as ofp +from os_ken.lib import mac +from os_ken.ofproto import ofproto_v1_3 as ofp from faucet import valve_of from faucet.port import ( diff --git a/tests/unit/faucet/test_valveapp_smoke.py b/tests/unit/faucet/test_valveapp_smoke.py index 5bcdb2ba55..60a38b6675 100755 --- a/tests/unit/faucet/test_valveapp_smoke.py +++ b/tests/unit/faucet/test_valveapp_smoke.py @@ -24,12 +24,12 @@ import os import unittest from prometheus_client import CollectorRegistry -from ryu.controller import dpset -from ryu.controller.ofp_event import EventOFPMsgBase +from os_ken.controller import dpset +from os_ken.controller.ofp_event import EventOFPMsgBase from faucet import faucet -class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr +class OSKenAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr """Test bare instantiation of controller classes.""" @staticmethod @@ -42,33 +42,33 @@ def test_faucet(self): os.environ['FAUCET_CONFIG'] = '/dev/null' os.environ['FAUCET_LOG'] = '/dev/null' os.environ['FAUCET_EXCEPTION_LOG'] = '/dev/null' - ryu_app = faucet.Faucet( + os_ken_app = faucet.Faucet( dpset={}, reg=CollectorRegistry()) - ryu_app.reload_config(None) - self.assertFalse(ryu_app._config_files_changed()) - ryu_app.metric_update(None) + os_ken_app.reload_config(None) + self.assertFalse(os_ken_app._config_files_changed()) + os_ken_app.metric_update(None) event_dp = dpset.EventDPReconnected(dp=self._fake_dp()) for enter in (True, False): event_dp.enter = enter - ryu_app.connect_or_disconnect_handler(event_dp) + os_ken_app.connect_or_disconnect_handler(event_dp) for event_handler in ( - ryu_app.error_handler, - ryu_app.features_handler, - ryu_app.packet_in_handler, - ryu_app.desc_stats_reply_handler, - ryu_app.port_desc_stats_reply_handler, - ryu_app.port_status_handler, - ryu_app.flowremoved_handler, - ryu_app.reconnect_handler, - ryu_app._datapath_connect, - ryu_app._datapath_disconnect): + os_ken_app.error_handler, + os_ken_app.features_handler, + os_ken_app.packet_in_handler, + os_ken_app.desc_stats_reply_handler, + os_ken_app.port_desc_stats_reply_handler, + os_ken_app.port_status_handler, + os_ken_app.flowremoved_handler, + os_ken_app.reconnect_handler, + os_ken_app._datapath_connect, + os_ken_app._datapath_disconnect): msg = namedtuple('msg', ['datapath'])(self._fake_dp()) event = EventOFPMsgBase(msg=msg) event.dp = msg.datapath event_handler(event) - ryu_app._check_thread_exception() - ryu_app._thread_jitter(1) + os_ken_app._check_thread_exception() + os_ken_app._thread_jitter(1) if __name__ == "__main__": diff --git a/tests/unit/gauge/test_gauge.py b/tests/unit/gauge/test_gauge.py index c939a99561..dcc984225d 100755 --- a/tests/unit/gauge/test_gauge.py +++ b/tests/unit/gauge/test_gauge.py @@ -21,11 +21,11 @@ import requests from requests.exceptions import ReadTimeout -from ryu.controller.ofp_event import EventOFPMsgBase -from ryu.lib import type_desc -from ryu.lib import hub -from ryu.ofproto import ofproto_v1_3 as ofproto -from ryu.ofproto import ofproto_v1_3_parser as parser +from os_ken.controller.ofp_event import EventOFPMsgBase +from os_ken.lib import type_desc +from os_ken.lib import hub +from os_ken.ofproto import ofproto_v1_3 as ofproto +from os_ken.ofproto import ofproto_v1_3_parser as parser from prometheus_client import CollectorRegistry @@ -890,18 +890,18 @@ def test_flow_stats(self): compare_flow_msg(msg, yaml_dict, self) -class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr +class OSKenAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr """Test Gauge Ryu app.""" def setUp(self): self.tmpdir = tempfile.mkdtemp() os.environ['GAUGE_LOG'] = os.path.join(self.tmpdir, 'gauge.log') os.environ['GAUGE_EXCEPTION_LOG'] = os.path.join(self.tmpdir, 'gauge-exception.log') - self.ryu_app = None + self.os_ken_app = None def tearDown(self): - valve_util.close_logger(self.ryu_app.logger) - valve_util.close_logger(self.ryu_app.exc_logger) + valve_util.close_logger(self.os_ken_app.logger) + valve_util.close_logger(self.os_ken_app.exc_logger) shutil.rmtree(self.tmpdir) @staticmethod @@ -924,16 +924,16 @@ def _write_config(config_file_name, config): def test_gauge(self): """Test Gauge can be initialized.""" os.environ['GAUGE_CONFIG'] = '/dev/null' - self.ryu_app = gauge.Gauge( + self.os_ken_app = gauge.Gauge( dpset={}, reg=CollectorRegistry()) - self.ryu_app.reload_config(None) - self.assertFalse(self.ryu_app._config_files_changed()) - self.ryu_app._update_watcher(None, self._fake_event()) - self.ryu_app._start_watchers(self._fake_dp(), {}, time.time()) + self.os_ken_app.reload_config(None) + self.assertFalse(self.os_ken_app._config_files_changed()) + self.os_ken_app._update_watcher(None, self._fake_event()) + self.os_ken_app._start_watchers(self._fake_dp(), {}, time.time()) for event_handler in ( - self.ryu_app._datapath_connect, - self.ryu_app._datapath_disconnect): + self.os_ken_app._datapath_connect, + self.os_ken_app._datapath_disconnect): event_handler(self._fake_event()) def test_gauge_config(self): @@ -990,34 +990,34 @@ def test_gauge_config(self): prometheus_port: 0 """ % os.environ['FAUCET_CONFIG'] self._write_config(os.environ['GAUGE_CONFIG'], gauge_conf) - self.ryu_app = gauge.Gauge( + self.os_ken_app = gauge.Gauge( dpset={}, reg=CollectorRegistry()) - self.ryu_app.reload_config(None) - self.assertFalse(self.ryu_app._config_files_changed()) - self.assertTrue(self.ryu_app.watchers) - self.ryu_app.reload_config(None) - self.assertTrue(self.ryu_app.watchers) - self.assertFalse(self.ryu_app._config_files_changed()) + self.os_ken_app.reload_config(None) + self.assertFalse(self.os_ken_app._config_files_changed()) + self.assertTrue(self.os_ken_app.watchers) + self.os_ken_app.reload_config(None) + self.assertTrue(self.os_ken_app.watchers) + self.assertFalse(self.os_ken_app._config_files_changed()) # Load a new FAUCET config. self._write_config(os.environ['FAUCET_CONFIG'], faucet_conf2) - self.assertTrue(self.ryu_app._config_files_changed()) - self.ryu_app.reload_config(None) - self.assertTrue(self.ryu_app.watchers) - self.assertFalse(self.ryu_app._config_files_changed()) + self.assertTrue(self.os_ken_app._config_files_changed()) + self.os_ken_app.reload_config(None) + self.assertTrue(self.os_ken_app.watchers) + self.assertFalse(self.os_ken_app._config_files_changed()) # Load an invalid Gauge config self._write_config(os.environ['GAUGE_CONFIG'], 'invalid') - self.assertTrue(self.ryu_app._config_files_changed()) - self.ryu_app.reload_config(None) - self.assertTrue(self.ryu_app.watchers) + self.assertTrue(self.os_ken_app._config_files_changed()) + self.os_ken_app.reload_config(None) + self.assertTrue(self.os_ken_app.watchers) # Keep trying to load a valid version. - self.assertTrue(self.ryu_app._config_files_changed()) + self.assertTrue(self.os_ken_app._config_files_changed()) # Load good Gauge config back self._write_config(os.environ['GAUGE_CONFIG'], gauge_conf) - self.assertTrue(self.ryu_app._config_files_changed()) - self.ryu_app.reload_config(None) - self.assertTrue(self.ryu_app.watchers) - self.assertFalse(self.ryu_app._config_files_changed()) + self.assertTrue(self.os_ken_app._config_files_changed()) + self.os_ken_app.reload_config(None) + self.assertTrue(self.os_ken_app.watchers) + self.assertFalse(self.os_ken_app._config_files_changed()) if __name__ == "__main__": From 8c479fc16cbf292deb9c21f543321c958d82830b Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 28 Oct 2021 20:11:20 +0000 Subject: [PATCH 110/231] req. --- requirements.txt | 2 +- test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index 64096ed209..bd35d051ec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ c65beka==1.0.0 c65chewie==1.0.2 -os_ken==2.1.0 +os_ken==2.2.0 influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 diff --git a/test-requirements.txt b/test-requirements.txt index eb0319508a..c4f4038c1d 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,5 +10,5 @@ packaging requests requirements-parser scapy==2.4.4 -webob -tinyrpc +webob==1.8.7 +tinyrpc==1.1.2 From b279f06c2e47c313bbf755227884dbab36f5ee20 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 29 Oct 2021 21:51:56 +0000 Subject: [PATCH 111/231] prometheus client. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bd35d051ec..d34b6f02a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,6 @@ influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 -prometheus_client==0.11.0 +prometheus_client==0.12.0 pyyaml==6.0 pytricia From fa52955ddd11b0cdd686c6b6c6063e3d30c64d29 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 30 Oct 2021 06:57:19 +0000 Subject: [PATCH 112/231] pylint. --- ofctl_rest/ofctl_rest.py | 24 +++++++++++++++-------- ofctl_rest/wsgi.py | 41 ++++++++++++++++++---------------------- 2 files changed, 34 insertions(+), 31 deletions(-) diff --git a/ofctl_rest/ofctl_rest.py b/ofctl_rest/ofctl_rest.py index cae83a369e..10947a4520 100644 --- a/ofctl_rest/ofctl_rest.py +++ b/ofctl_rest/ofctl_rest.py @@ -44,6 +44,8 @@ ofproto_v1_3.OFP_VERSION: ofctl_v1_3, } +# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods,disable=unused-argument,disable=no-member + # REST API # @@ -290,7 +292,7 @@ def wrapper(self, req, *args, **kwargs): class StatsController(ControllerBase): def __init__(self, req, link, data, **config): - super(StatsController, self).__init__(req, link, data, **config) + super().__init__(req, link, data, **config) self.dpset = data['dpset'] self.waiters = data['waiters'] @@ -410,8 +412,9 @@ def get_port_desc(self, req, dp, ofctl, port_no=None, **kwargs): def get_role(self, req, dp, ofctl, **kwargs): return ofctl.get_role(dp, self.waiters) + @staticmethod @command_method - def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): + def mod_flow_entry(req, dp, ofctl, flow, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPFC_ADD, 'modify': dp.ofproto.OFPFC_MODIFY, @@ -425,13 +428,15 @@ def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): ofctl.mod_flow_entry(dp, flow, mod_cmd) + @staticmethod @command_method - def delete_flow_entry(self, req, dp, ofctl, flow, **kwargs): + def delete_flow_entry(req, dp, ofctl, flow, **kwargs): flow = {'table_id': dp.ofproto.OFPTT_ALL} ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) + @staticmethod @command_method - def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): + def mod_meter_entry(req, dp, ofctl, meter, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPMC_ADD, 'modify': dp.ofproto.OFPMC_MODIFY, @@ -443,8 +448,9 @@ def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): ofctl.mod_meter_entry(dp, meter, mod_cmd) + @staticmethod @command_method - def mod_group_entry(self, req, dp, ofctl, group, cmd, **kwargs): + def mod_group_entry(req, dp, ofctl, group, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPGC_ADD, 'modify': dp.ofproto.OFPGC_MODIFY, @@ -473,12 +479,14 @@ def mod_port_behavior(self, req, dp, ofctl, port_config, cmd, **kwargs): ofctl.mod_port_behavior(dp, port_config) + @staticmethod @command_method - def send_experimenter(self, req, dp, ofctl, exp, **kwargs): + def send_experimenter(req, dp, ofctl, exp, **kwargs): ofctl.send_experimenter(dp, exp) + @staticmethod @command_method - def set_role(self, req, dp, ofctl, role, **kwargs): + def set_role(req, dp, ofctl, role, **kwargs): ofctl.set_role(dp, role) @@ -490,7 +498,7 @@ class RestStatsApi(app_manager.OSKenApp): } def __init__(self, *args, **kwargs): - super(RestStatsApi, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.dpset = kwargs['dpset'] wsgi = kwargs['wsgi'] self.waiters = {} diff --git a/ofctl_rest/wsgi.py b/ofctl_rest/wsgi.py index 29fc1c6664..fb73ce7131 100644 --- a/ofctl_rest/wsgi.py +++ b/ofctl_rest/wsgi.py @@ -22,7 +22,6 @@ import six from tinyrpc.server import RPCServer from tinyrpc.dispatch import RPCDispatcher -from tinyrpc.dispatch import public as rpc_public from tinyrpc.protocols.jsonrpc import JSONRPCProtocol from tinyrpc.transports import ServerTransport, ClientTransport from tinyrpc.client import RPCClient @@ -30,13 +29,15 @@ import webob.exc from webob.request import Request as webob_Request from webob.response import Response as webob_Response +import eventlet.wsgi -from os_ken import cfg from os_ken.lib import hub HEX_PATTERN = r'0x[0-9a-z]+' DIGIT_PATTERN = r'[1-9][0-9]*' +# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods + def route(name, path, methods=None, requirements=None): def _route(controller_method): @@ -60,7 +61,7 @@ class Request(webob_Request): DEFAULT_CHARSET = "UTF-8" def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs): - super(Request, self).__init__( + super().__init__( environ, charset=charset, *args, **kwargs) @@ -74,10 +75,10 @@ class Response(webob_Response): DEFAULT_CHARSET = "UTF-8" def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs): - super(Response, self).__init__(charset=charset, *args, **kwargs) + super().__init__(charset=charset, *args, **kwargs) -class WebSocketRegistrationWrapper(object): +class WebSocketRegistrationWrapper: def __init__(self, func, controller): self._controller = controller @@ -88,7 +89,7 @@ def __call__(self, ws): ws_manager = wsgi_application.websocketmanager ws_manager.add_connection(ws) try: - self._controller_method(ws) + self._controller_method(ws) # pylint: disable=not-callable finally: ws_manager.delete_connection(ws) @@ -96,14 +97,7 @@ def __call__(self, ws): class _AlreadyHandledResponse(Response): # XXX: Eventlet API should not be used directly. # https://github.com/benoitc/gunicorn/pull/2581 - from packaging import version - import eventlet - if version.parse(eventlet.__version__) >= version.parse("0.30.3"): - import eventlet.wsgi - _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) - else: - from eventlet.wsgi import ALREADY_HANDLED - _ALREADY_HANDLED = ALREADY_HANDLED + _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) def __call__(self, environ, start_response): return self._ALREADY_HANDLED @@ -129,7 +123,7 @@ def __websocket(self, req, **_): return _websocket -class ControllerBase(object): +class ControllerBase: special_vars = ['action', 'controller'] def __init__(self, req, link, data, **config): @@ -176,7 +170,7 @@ class WebSocketRPCServer(RPCServer): def __init__(self, ws, rpc_callback): dispatcher = RPCDispatcher() dispatcher.register_instance(rpc_callback) - super(WebSocketRPCServer, self).__init__( + super().__init__( WebSocketServerTransport(ws), JSONRPCProtocol(), dispatcher, @@ -184,7 +178,7 @@ def __init__(self, ws, rpc_callback): def serve_forever(self): try: - super(WebSocketRPCServer, self).serve_forever() + super().serve_forever() except WebSocketDisconnectedError: return @@ -203,6 +197,7 @@ def send_message(self, message, expect_reply=True): if expect_reply: return self.queue.get() + return None class WebSocketRPCClient(RPCClient): @@ -210,7 +205,7 @@ class WebSocketRPCClient(RPCClient): def __init__(self, ws): self.ws = ws self.queue = hub.Queue() - super(WebSocketRPCClient, self).__init__( + super().__init__( JSONRPCProtocol(), WebSocketClientTransport(ws, self.queue), ) @@ -226,10 +221,10 @@ def serve_forever(self): class wsgify_hack(webob.dec.wsgify): def __call__(self, environ, start_response): self.kwargs['start_response'] = start_response - return super(wsgify_hack, self).__call__(environ, start_response) + return super().__call__(environ, start_response) -class WebSocketManager(object): +class WebSocketManager: def __init__(self): self._connections = [] @@ -245,13 +240,13 @@ def broadcast(self, msg): connection.send(msg) -class WSGIApplication(object): +class WSGIApplication: def __init__(self, **config): self.config = config self.mapper = Mapper() self.registory = {} self._wsmanager = WebSocketManager() - super(WSGIApplication, self).__init__() + super().__init__() def _match(self, req): # Note: Invoke the new API, first. If the arguments unmatched, @@ -314,7 +309,7 @@ def websocketmanager(self): class WSGIServer(hub.WSGIServer): def __init__(self, application, host, port, **config): - super(WSGIServer, self).__init__((host, port), application, **config) + super().__init__((host, port), application, **config) def __call__(self): self.serve_forever() From 719bf012dfa64945f7bb81033ad756f5981486e3 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 30 Oct 2021 06:58:34 +0000 Subject: [PATCH 113/231] ofctl_rest/pylint. --- clib/mininet_test_topo.py | 3 ++- {ofctl_rest => clib/ofctl_rest}/ofctl_rest.py | 0 {ofctl_rest => clib/ofctl_rest}/wsgi.py | 0 3 files changed, 2 insertions(+), 1 deletion(-) rename {ofctl_rest => clib/ofctl_rest}/ofctl_rest.py (100%) rename {ofctl_rest => clib/ofctl_rest}/wsgi.py (100%) diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index 7abf6fdaaf..a2a7ae21c6 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -637,7 +637,8 @@ def stop(self): # pylint: disable=arguments-differ class FAUCET(BaseFAUCET): """Start a FAUCET controller.""" - START_ARGS = ['--ryu-app-lists=%s' % (os.path.dirname(os.path.realpath(__file__)) + '/../ofctl_rest/ofctl_rest.py')] + START_ARGS = ['--ryu-app-lists=%s' % (os.path.join(os.path.dirname( + os.path.realpath(__file__)), 'ofctl_rest/ofctl_rest.py'))] def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, ctl_privkey, ctl_cert, ca_certs, diff --git a/ofctl_rest/ofctl_rest.py b/clib/ofctl_rest/ofctl_rest.py similarity index 100% rename from ofctl_rest/ofctl_rest.py rename to clib/ofctl_rest/ofctl_rest.py diff --git a/ofctl_rest/wsgi.py b/clib/ofctl_rest/wsgi.py similarity index 100% rename from ofctl_rest/wsgi.py rename to clib/ofctl_rest/wsgi.py From b1e925ede989fe55899d3f4a169073556547b14a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 30 Oct 2021 07:10:44 +0000 Subject: [PATCH 114/231] pytype. --- clib/ofctl_rest/ofctl_rest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/clib/ofctl_rest/ofctl_rest.py b/clib/ofctl_rest/ofctl_rest.py index 10947a4520..2a99d0dedd 100644 --- a/clib/ofctl_rest/ofctl_rest.py +++ b/clib/ofctl_rest/ofctl_rest.py @@ -291,6 +291,8 @@ def wrapper(self, req, *args, **kwargs): class StatsController(ControllerBase): + # pytype: disable=no-attribute + def __init__(self, req, link, data, **config): super().__init__(req, link, data, **config) self.dpset = data['dpset'] From c90c102d5b2e734fa509ac886a144012fc365f39 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 30 Oct 2021 07:12:23 +0000 Subject: [PATCH 115/231] error. --- clib/ofctl_rest/ofctl_rest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/ofctl_rest/ofctl_rest.py b/clib/ofctl_rest/ofctl_rest.py index 2a99d0dedd..b2b136b8f2 100644 --- a/clib/ofctl_rest/ofctl_rest.py +++ b/clib/ofctl_rest/ofctl_rest.py @@ -291,7 +291,7 @@ def wrapper(self, req, *args, **kwargs): class StatsController(ControllerBase): - # pytype: disable=no-attribute + # pytype: disable=attribute-error def __init__(self, req, link, data, **config): super().__init__(req, link, data, **config) From 48b54744cde9d1949ad412553a2c9bf6eabf0625 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 31 Oct 2021 01:53:50 +0000 Subject: [PATCH 116/231] new pytype. --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 396b816c93..fdb5a6b234 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -r docs/requirements.txt flake8==4.0.1 pylint==2.11.1 -pytype==2021.10.18 +pytype==2021.10.25 From 3e216487c96799e86baddea80aa4b4830e65f095 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 31 Oct 2021 01:56:45 +0000 Subject: [PATCH 117/231] unused consts. --- clib/ofctl_rest/wsgi.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/clib/ofctl_rest/wsgi.py b/clib/ofctl_rest/wsgi.py index fb73ce7131..f385a7a2fd 100644 --- a/clib/ofctl_rest/wsgi.py +++ b/clib/ofctl_rest/wsgi.py @@ -33,9 +33,6 @@ from os_ken.lib import hub -HEX_PATTERN = r'0x[0-9a-z]+' -DIGIT_PATTERN = r'[1-9][0-9]*' - # pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods From 22b710d3f2366624aa08258513e5530a0263b75a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 2 Nov 2021 19:27:15 +0000 Subject: [PATCH 118/231] upgrade prom and grafana. --- docker-compose.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index ebcb0214f5..7398de11d9 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -17,7 +17,7 @@ services: prometheus: restart: always - image: 'prom/prometheus:v2.28.1' + image: 'prom/prometheus:v2.31.0' user: 'root' ports: - '9090:9090' @@ -31,7 +31,7 @@ services: grafana: restart: always - image: 'grafana/grafana:8.1.6' + image: 'grafana/grafana:8.2.2' user: 'root' ports: - '3000:3000' From 50f464674e4c963a2b36837576e5438b10b13769 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 9 Nov 2021 00:40:22 +0000 Subject: [PATCH 119/231] osken 2.3. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d34b6f02a6..30a2bdee28 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ c65beka==1.0.0 c65chewie==1.0.2 -os_ken==2.2.0 +os_ken==2.3.0 influxdb>=2.12.0 msgpack==1.0.2 networkx>=1.9 From e5a2487e00588cb3cf29228e3f508d31aa1b2dda Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 14 Nov 2021 18:47:07 +1300 Subject: [PATCH 120/231] pytype. --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 49216009b3..cd675e0a6c 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -r docs/requirements.txt flake8==4.0.1 pylint==2.11.1 -pytype==2021.11.2 +pytype==2021.11.12 From 93f19a63673d1ed7c2fc280d2fb38d8e9a2f9fe5 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 14 Nov 2021 20:29:58 +0000 Subject: [PATCH 121/231] pytricia. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 00cb2bc7c1..30a2bdee28 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,3 +7,4 @@ networkx>=1.9 pbr==5.5.1 prometheus_client==0.12.0 pyyaml==6.0 +pytricia From 75c4e8c5d13ce426442e9480e6b2113b651b3b08 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 22 Nov 2021 00:11:35 +0000 Subject: [PATCH 122/231] migrate to ruamel.yaml. --- clib/clib_mininet_test_main.py | 3 +- clib/mininet_test_base.py | 13 +++---- clib/mininet_test_base_topo.py | 3 +- clib/valve_test_lib.py | 18 +++++++--- faucet/config_parser_util.py | 49 ++++++-------------------- requirements.txt | 2 +- tests/generative/unit/test_topology.py | 7 ++-- tests/integration/mininet_tests.py | 5 ++- tests/unit/faucet/test_config.py | 6 ++-- tests/unit/faucet/test_valve.py | 15 ++++---- tests/unit/faucet/test_valve_config.py | 2 +- tests/unit/faucet/test_valve_egress.py | 2 +- tests/unit/faucet/test_valve_stack.py | 21 ++++++----- tests/unit/gauge/test_gauge.py | 5 ++- 14 files changed, 65 insertions(+), 86 deletions(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 6ae6dbc6ea..06964348a1 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -43,6 +43,7 @@ from mininet.clean import Cleanup from clib import mininet_test_util +from clib.valve_test_lib import yaml_load DEFAULT_HARDWARE = 'Open vSwitch' @@ -131,7 +132,7 @@ def import_hw_config(): sys.exit(-1) try: with open(config_file_name, 'r', encoding='utf-8') as config_file: - config = yaml.safe_load(config_file) + config = yaml_load(config_file) except IOError: print(f'Could not load YAML config data from {config_file_name}') sys.exit(-1) diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index f7a91b08d2..ca1fd66dd9 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -21,7 +21,6 @@ import tempfile import time import unittest -import yaml import netaddr import requests @@ -37,6 +36,8 @@ from clib import mininet_test_topo from clib.mininet_test_topo import FaucetLink from clib.tcpdump_helper import TcpdumpHelper +from clib.valve_test_lib import yaml_load, yaml_dump + MAX_TEST_VID = 512 OFPVID_PRESENT = 0x1000 @@ -312,7 +313,7 @@ def _wait_until_matching_event(self, match_func, timeout=30): @staticmethod def _read_yaml(yaml_path): with open(yaml_path, encoding='utf-8') as yaml_file: - content = yaml.safe_load(yaml_file.read()) + content = yaml_load(yaml_file.read()) return content def _get_faucet_conf(self): @@ -347,7 +348,7 @@ def _annotate_interfaces_conf(yaml_conf): @staticmethod def _write_yaml_conf(yaml_path, yaml_conf): assert isinstance(yaml_conf, dict) - new_conf_str = yaml.dump(yaml_conf).encode() + new_conf_str = yaml_dump(yaml_conf).encode() with tempfile.NamedTemporaryFile( prefix=os.path.basename(yaml_path), dir=os.path.dirname(yaml_path), @@ -372,8 +373,8 @@ def _init_faucet_config(self): for config_var in (self.config_ports, self.port_map): config_vars.update(config_var) faucet_config = faucet_config % config_vars - yaml_conf = self._annotate_interfaces_conf(yaml.safe_load(faucet_config)) - self._write_yaml_conf(self.faucet_config_path, yaml_conf) + yaml_conf = yaml_dump(self._annotate_interfaces_conf(yaml_load(faucet_config))) + self._write_yaml_conf(self.faucet_config_path, yaml_load(yaml_conf)) def _init_gauge_config(self): gauge_config = self.get_gauge_config( @@ -383,7 +384,7 @@ def _init_gauge_config(self): self.monitor_flow_table_dir) if self.config_ports: gauge_config = gauge_config % self.config_ports - self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config)) + self._write_yaml_conf(self.gauge_config_path, yaml_load(gauge_config)) def _test_name(self): return mininet_test_util.flat_test_name(self.id()) diff --git a/clib/mininet_test_base_topo.py b/clib/mininet_test_base_topo.py index a84692ebc1..85e30fc497 100644 --- a/clib/mininet_test_base_topo.py +++ b/clib/mininet_test_base_topo.py @@ -12,6 +12,7 @@ from clib.mininet_test_util import timeout_cmd from clib.mininet_test_base import FaucetTestBase, IPV4_ETH from clib.config_generator import FaucetTopoGenerator +from clib.valve_test_lib import yaml_load class FaucetTopoTestBase(FaucetTestBase): @@ -55,7 +56,7 @@ def _init_faucet_config(self): for config_var in (self.config_ports, self.port_map): config_vars.update(config_var) faucet_config = self.CONFIG % config_vars - self._write_yaml_conf(self.faucet_config_path, yaml.safe_load(faucet_config)) + self._write_yaml_conf(self.faucet_config_path, yaml_load(faucet_config)) def _annotate_interfaces_conf(self, yaml_conf): """We don't need to annotate the interfaces""" diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index aced72e331..a3e4f4063e 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -32,7 +32,7 @@ import tempfile import unittest -import yaml +from ruamel.yaml.main import round_trip_load, round_trip_dump from os_ken.lib import mac from os_ken.lib.packet import ( @@ -57,6 +57,14 @@ from clib.fakeoftable import FakeOFNetwork +def yaml_load(yaml_str): + return round_trip_load(yaml_str) + + +def yaml_dump(yaml_dict): + return round_trip_dump(yaml_dict) + + def build_dict(pkt): """ Build and return a dictionary from a pkt @@ -1311,9 +1319,9 @@ def pkt_match(self, src, dst): } def _config_edge_learn_stack_root(self, new_value): - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) config['vlans']['v100']['edge_learn_stack_root'] = new_value - return yaml.dump(config) + return yaml_dump(config) def learn_hosts(self): """Learn some hosts.""" @@ -2128,10 +2136,10 @@ def test_port_add_input(self): _ = self.valves_manager.valves[self.DP_ID] match = {'in_port': 1, 'vlan_vid': 0} - orig_config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + orig_config = yaml_load(self.CONFIG) deletedport1_config = copy.copy(orig_config) del deletedport1_config['dps'][self.DP_NAME]['interfaces']['p1'] - self.update_config(yaml.dump(deletedport1_config)) + self.update_config(yaml_dump(deletedport1_config)) self.assertFalse( self.network.tables[self.DP_ID].is_output(match, port=2, vid=self.V100), msg='Packet output after port delete') diff --git a/faucet/config_parser_util.py b/faucet/config_parser_util.py index 01f912be8d..9668dbe0cf 100644 --- a/faucet/config_parser_util.py +++ b/faucet/config_parser_util.py @@ -19,45 +19,17 @@ import hashlib import logging import os -# pytype: disable=pyi-error -import yaml -from yaml.constructor import ConstructorError - -# handle libyaml-dev not installed -try: - from yaml import CLoader as Loader # type: ignore -except ImportError: - from yaml import Loader +from ruamel.yaml import round_trip_load +from ruamel.yaml.constructor import DuplicateKeyError +from ruamel.yaml.scanner import ScannerError +from ruamel.yaml.composer import ComposerError CONFIG_HASH_FUNC = 'sha256' -class UniqueKeyLoader(Loader): # pylint: disable=too-many-ancestors - """YAML loader that will reject duplicate/overwriting keys.""" - - def construct_mapping(self, node, deep=False): - """Check for duplicate YAML keys.""" - try: - key_value_pairs = [ - (self.construct_object(key_node, deep=deep), - self.construct_object(value_node, deep=deep)) - for key_node, value_node in node.value] - except TypeError as err: - raise ConstructorError('invalid key type: %s' % err) from err - mapping = {} - for key, value in key_value_pairs: - try: - if key in mapping: - raise ConstructorError('duplicate key: %s' % key) - except TypeError as type_error: - raise ConstructorError('unhashable key: %s' % key) from type_error - mapping[key] = value - return mapping - - -yaml.SafeLoader.add_constructor( - yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, - UniqueKeyLoader.construct_mapping) +def yaml_load(yaml_str): + """Wrap YAML load library.""" + return round_trip_load(yaml_str) def get_logger(logname): @@ -74,9 +46,10 @@ def read_config(config_file, logname): try: with open(config_file, 'r', encoding='utf-8') as stream: conf_txt = stream.read() - conf = yaml.safe_load(conf_txt) - except (yaml.YAMLError, UnicodeDecodeError, - PermissionError, ValueError) as err: # pytype: disable=name-error + conf = yaml_load(conf_txt) + except (TypeError, UnicodeDecodeError, + PermissionError, ValueError, + ScannerError, DuplicateKeyError, ComposerError) as err: # pytype: disable=name-error logger.error('Error in file %s (%s)', config_file, str(err)) except FileNotFoundError as err: # pytype: disable=name-error logger.error('Could not find requested file: %s (%s)', config_file, str(err)) diff --git a/requirements.txt b/requirements.txt index 30a2bdee28..c7e5303440 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,5 +6,5 @@ msgpack==1.0.2 networkx>=1.9 pbr==5.5.1 prometheus_client==0.12.0 -pyyaml==6.0 +ruamel.yaml==0.17.17 pytricia diff --git a/tests/generative/unit/test_topology.py b/tests/generative/unit/test_topology.py index d95a1e6849..38659ecaaa 100755 --- a/tests/generative/unit/test_topology.py +++ b/tests/generative/unit/test_topology.py @@ -20,7 +20,6 @@ import random import unittest -import yaml from os_ken.lib import mac from os_ken.ofproto import ofproto_v1_3 as ofp @@ -28,7 +27,7 @@ import networkx from networkx.generators.atlas import graph_atlas_g -from clib.valve_test_lib import ValveTestBases +from clib.valve_test_lib import ValveTestBases, yaml_load, yaml_dump from clib.config_generator import FaucetFakeOFTopoGenerator @@ -131,7 +130,7 @@ def verify_flood_traversal(self): def verify_vlan_change(self): """Change host VLAN, check restart of rules consistent""" _, host_port_maps, _ = self.topo.create_port_maps() - yaml_config = yaml.safe_load(self.CONFIG) + yaml_config = yaml_load(self.CONFIG) intf_config = yaml_config['dps'][self.topo.switches_by_id[1]]['interfaces'] for host_i in host_port_maps: @@ -152,7 +151,7 @@ def verify_vlan_change(self): # Created a different VLAN so now stop searching break - new_config = yaml.dump(yaml_config) + new_config = yaml_dump(yaml_config) self.update_and_revert_config(self.CONFIG, new_config, None) def validate_topology_change(self): diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index 6896ec0090..c5fe3a18f3 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -26,8 +26,6 @@ import scapy.all -import yaml # pytype: disable=pyi-error - from mininet.log import error from mininet.util import pmonitor @@ -35,6 +33,7 @@ from clib import mininet_test_util from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH +from clib.valve_test_lib import yaml_load MIN_MBPS = 100 @@ -1672,7 +1671,7 @@ def _init_gauge_config(self): self.monitor_meter_stats_file) if self.config_ports: gauge_config = gauge_config % self.config_ports - self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config)) + self._write_yaml_conf(self.gauge_config_path, yaml_load(gauge_config)) def test_untagged(self): """All hosts on the same untagged VLAN should have connectivity.""" diff --git a/tests/unit/faucet/test_config.py b/tests/unit/faucet/test_config.py index b5043c9357..098ff295ae 100755 --- a/tests/unit/faucet/test_config.py +++ b/tests/unit/faucet/test_config.py @@ -2897,9 +2897,9 @@ def test_lacp_port_options_exclusivity(self): native_vlan: vlan100 lacp: 1 lacp_port_selected: True - lacp_selected = True - lacp_unselected = True - lacp_standby = True + lacp_selected: True + lacp_unselected: True + lacp_standby: True """ self.check_config_failure(config, cp.dp_parser) diff --git a/tests/unit/faucet/test_valve.py b/tests/unit/faucet/test_valve.py index a77e050ad5..6e9598cd00 100644 --- a/tests/unit/faucet/test_valve.py +++ b/tests/unit/faucet/test_valve.py @@ -22,7 +22,6 @@ import copy import time import unittest -import yaml from os_ken.lib import mac from os_ken.lib.packet import slow @@ -35,7 +34,7 @@ from clib.valve_test_lib import ( CONFIG, DP1_CONFIG, FAUCET_MAC, GROUP_DP1_CONFIG, IDLE_DP1_CONFIG, - ValveTestBases) + ValveTestBases, yaml_load, yaml_dump) from clib.fakeoftable import CONTROLLER_PORT @@ -755,10 +754,10 @@ def setUp(self): self.setup_valves(self.CONFIG) def test_soft(self): - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) config['dps']['s1']['interfaces']['p1']['acls_in'] = ['acl2'] # We changed match conditions only, so this can be a warm start. - self.update_config(yaml.dump(config), reload_type='warm') + self.update_config(yaml_dump(config), reload_type='warm') class HardPipelineTestCase(ValveTestBases.ValveTestNetwork): @@ -795,10 +794,10 @@ def setUp(self): self.setup_valves(self.CONFIG) def test_hard(self): - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) config['dps']['s1']['interfaces']['p1']['acls_in'] = ['acl2'] # Changed match conditions require restart. - self.update_config(yaml.dump(config), reload_type='cold') + self.update_config(yaml_dump(config), reload_type='cold') class ValveMirrorTestCase(ValveTestBases.ValveTestBig): @@ -886,9 +885,9 @@ def setUp(self): self.setup_valves(self.CONFIG) def test_unmirror(self): - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) del config['dps']['s1']['interfaces']['p5']['mirror'] - self.update_config(yaml.dump(config), reload_type='warm') + self.update_config(yaml_dump(config), reload_type='warm') class ValvePortDescTestCase(ValveTestBases.ValveTestNetwork): diff --git a/tests/unit/faucet/test_valve_config.py b/tests/unit/faucet/test_valve_config.py index 3a72490a61..719cf32968 100755 --- a/tests/unit/faucet/test_valve_config.py +++ b/tests/unit/faucet/test_valve_config.py @@ -875,7 +875,7 @@ def test_vlan_acl_deny(self): faucet_mac: '{mac}' faucet_vips: ['{v200_vip}/64'] acl_out: drop_non_allow_host_v6 - minimum_ip_size_check: no + minimum_ip_size_check: false routers: r_v100_v200: vlans: [v100, v200] diff --git a/tests/unit/faucet/test_valve_egress.py b/tests/unit/faucet/test_valve_egress.py index 61f28c2095..2e47542cb0 100755 --- a/tests/unit/faucet/test_valve_egress.py +++ b/tests/unit/faucet/test_valve_egress.py @@ -73,7 +73,7 @@ def test_vlan_acl_deny(self): faucet_mac: '{mac}' faucet_vips: ['{v200_vip}/64'] acl_out: drop_non_allow_host_v6 - minimum_ip_size_check: no + minimum_ip_size_check: false routers: r_v100_v200: vlans: [v100, v200] diff --git a/tests/unit/faucet/test_valve_stack.py b/tests/unit/faucet/test_valve_stack.py index 6ee37d390f..338c294f1f 100644 --- a/tests/unit/faucet/test_valve_stack.py +++ b/tests/unit/faucet/test_valve_stack.py @@ -23,7 +23,6 @@ from functools import partial import unittest import ipaddress -import yaml from os_ken.lib import mac from os_ken.ofproto import ofproto_v1_3 as ofp @@ -36,7 +35,7 @@ from clib.fakeoftable import CONTROLLER_PORT from clib.valve_test_lib import ( - BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases) + BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases, yaml_load, yaml_dump) class ValveEdgeVLANTestCase(ValveTestBases.ValveTestNetwork): @@ -1276,12 +1275,12 @@ def test_update_stack_graph(self): def _set_max_lldp_lost(self, new_value): """Set the interface config option max_lldp_lost""" - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) for dp in config['dps'].values(): for interface in dp['interfaces'].values(): if 'stack' in interface: interface['max_lldp_lost'] = new_value - return yaml.dump(config) + return yaml_dump(config) def test_max_lldp_timeout(self): """Check that timeout can be increased""" @@ -1740,9 +1739,9 @@ def test_update_src_tunnel(self): self.DP_ID, self.DP_ID, 1, 0, 3, self.SRC_ID, True, 'Did not encapsulate and forward') - new_config_yaml = yaml.safe_load(self.CONFIG) + new_config_yaml = yaml_load(self.CONFIG) new_config_yaml['dps']['s1']['interfaces'][1]['description'] = 'changed' - self.update_config(yaml.dump(new_config_yaml), reload_type=None) + self.update_config(yaml_dump(new_config_yaml), reload_type=None) self.activate_all_ports() # warm start with no topo change with tunnel. self.validate_tunnel( @@ -1950,9 +1949,9 @@ class ValveTestMultipleTunnel(ValveTestBases.ValveTestTunnel): """ def test_new_tunnel_source(self): - config = yaml.load(self.CONFIG, Loader=yaml.SafeLoader) + config = yaml_load(self.CONFIG) config['dps']['s1']['interfaces'][5]['acls_in'] = ['tunnel_acl'] - self.update_config(yaml.dump(config), reload_type='warm') + self.update_config(yaml_dump(config), reload_type='warm') self.activate_all_ports() self.test_tunnel_update_multiple_tunnels() @@ -4213,7 +4212,7 @@ def setUp(self): def test_stack(self): """Test getting config for stack with correct config""" dp = self.valves_manager.valves[1].dp - stack_conf = yaml.safe_load(dp.stack.to_conf()) + stack_conf = yaml_load(dp.stack.to_conf()) self.assertIsInstance(stack_conf, dict) self.assertIn('priority', stack_conf) self.assertIn('down_time_multiple', stack_conf) @@ -4226,8 +4225,8 @@ def test_stack(self): def test_dp_stack(self): """Test getting config for DP with correct subconfig stack""" dp = self.valves_manager.valves[1].dp - dp_conf = yaml.safe_load(dp.to_conf()) - stack_conf = yaml.safe_load(dp.stack.to_conf()) + dp_conf = yaml_load(dp.to_conf()) + stack_conf = yaml_load(dp.stack.to_conf()) self.assertIn('stack', dp_conf) self.assertIsInstance(dp_conf['stack'], dict) self.assertEqual(dp_conf['stack'], stack_conf) diff --git a/tests/unit/gauge/test_gauge.py b/tests/unit/gauge/test_gauge.py index dcc984225d..de5633cc06 100755 --- a/tests/unit/gauge/test_gauge.py +++ b/tests/unit/gauge/test_gauge.py @@ -16,8 +16,6 @@ from http.server import HTTPServer, BaseHTTPRequestHandler -import yaml - import requests from requests.exceptions import ReadTimeout @@ -30,6 +28,7 @@ from prometheus_client import CollectorRegistry from faucet import gauge, gauge_prom, gauge_influx, gauge_pollers, watcher, valve_util +from clib.valve_test_lib import yaml_load class QuietHandler(BaseHTTPRequestHandler): @@ -885,7 +884,7 @@ def test_flow_stats(self): "{}--flowtable--{}.json".format(datapath.name, rcv_time_str) ) - yaml_dict = yaml.safe_load(log_str)['OFPFlowStatsReply']['body'][0]['OFPFlowStats'] + yaml_dict = yaml_load(log_str)['OFPFlowStatsReply']['body'][0]['OFPFlowStats'] compare_flow_msg(msg, yaml_dict, self) From 41b0c9b3d1805383704ccaf7f0ba02b108968cee Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 22 Nov 2021 18:48:20 +0000 Subject: [PATCH 123/231] shim yaml in one place. --- clib/clib_mininet_test_main.py | 7 ++----- clib/config_generator.py | 7 +++---- clib/mininet_test_base_topo.py | 1 - clib/valve_test_lib.py | 10 +--------- faucet/config_parser_util.py | 7 ++++++- tests/generative/unit/test_topology.py | 3 ++- tests/integration/mininet_tests.py | 3 ++- tests/unit/faucet/test_valve.py | 3 ++- tests/unit/faucet/test_valve_stack.py | 3 ++- tests/unit/gauge/test_gauge.py | 2 +- 10 files changed, 21 insertions(+), 25 deletions(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 06964348a1..8fdadc1364 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -34,8 +34,6 @@ import traceback import unittest -import yaml - from packaging import version from concurrencytest import ConcurrentTestSuite, fork_for_tests @@ -43,7 +41,7 @@ from mininet.clean import Cleanup from clib import mininet_test_util -from clib.valve_test_lib import yaml_load +from clib.valve_test_lib import yaml_load, yaml_dump DEFAULT_HARDWARE = 'Open vSwitch' @@ -542,8 +540,7 @@ def report_results(results, hw_config, report_json_filename): ('OK', result.successes)) for test_status, test_list in test_lists: tests_json.update(report_tests(test_status, test_list, result)) - print(yaml.dump( - tests_json, default_flow_style=False, explicit_start=True, explicit_end=True)) + print(yaml_dump(tests_json)) if report_json_filename: report_json = { 'hw_config': hw_config, diff --git a/clib/config_generator.py b/clib/config_generator.py index 82cdd59757..41a670ac49 100644 --- a/clib/config_generator.py +++ b/clib/config_generator.py @@ -19,13 +19,12 @@ import random import string -import yaml - from mininet.log import output from mininet.topo import Topo from clib import mininet_test_util from clib.mininet_test_topo import FaucetHost, VLANHost, FaucetSwitch, NoControllerFaucetSwitch +from clib.valve_test_lib import yaml_dump class GenerationError(Exception): @@ -400,7 +399,7 @@ def get_dps_config(self, dp_options, host_options, link_options, ignored_switche def get_interface_config(link_name, src_port, dst_node, dst_port, vlans, options, ignored): interface_config = {} - type_ = 'switch-switch' if dst_port else 'switch-host' + _type = 'switch-switch' if dst_port else 'switch-host' if ignored: # Link is to an outside network, so treat it as a output only link with more # specific options defined in the options dictionary @@ -574,7 +573,7 @@ def get_config(self, n_vlans, acl_options=None, dp_options=None, host_options=No ignored_switches = [] config['dps'] = self.get_dps_config( dp_options, host_options, link_options, ignored_switches) - return yaml.dump(config, default_flow_style=False) + return yaml_dump(config) class FaucetFakeOFTopoGenerator(FaucetTopoGenerator): diff --git a/clib/mininet_test_base_topo.py b/clib/mininet_test_base_topo.py index 85e30fc497..202cab3805 100644 --- a/clib/mininet_test_base_topo.py +++ b/clib/mininet_test_base_topo.py @@ -7,7 +7,6 @@ import ipaddress import pprint from functools import partial -import yaml # pytype: disable=pyi-error from clib.mininet_test_util import timeout_cmd from clib.mininet_test_base import FaucetTestBase, IPV4_ETH diff --git a/clib/valve_test_lib.py b/clib/valve_test_lib.py index a3e4f4063e..4eb172612f 100644 --- a/clib/valve_test_lib.py +++ b/clib/valve_test_lib.py @@ -32,7 +32,6 @@ import tempfile import unittest -from ruamel.yaml.main import round_trip_load, round_trip_dump from os_ken.lib import mac from os_ken.lib.packet import ( @@ -52,19 +51,12 @@ from faucet import valve_of from faucet import valve_packet from faucet import valve_util +from faucet.config_parser_util import yaml_load, yaml_dump from faucet.valve import TfmValve from clib.fakeoftable import FakeOFNetwork -def yaml_load(yaml_str): - return round_trip_load(yaml_str) - - -def yaml_dump(yaml_dict): - return round_trip_dump(yaml_dict) - - def build_dict(pkt): """ Build and return a dictionary from a pkt diff --git a/faucet/config_parser_util.py b/faucet/config_parser_util.py index 9668dbe0cf..3bf6606798 100644 --- a/faucet/config_parser_util.py +++ b/faucet/config_parser_util.py @@ -19,7 +19,7 @@ import hashlib import logging import os -from ruamel.yaml import round_trip_load +from ruamel.yaml import round_trip_load, round_trip_dump from ruamel.yaml.constructor import DuplicateKeyError from ruamel.yaml.scanner import ScannerError from ruamel.yaml.composer import ComposerError @@ -32,6 +32,11 @@ def yaml_load(yaml_str): return round_trip_load(yaml_str) +def yaml_dump(yaml_dict): + """Wrap YAML dump library.""" + return round_trip_dump(yaml_dict) + + def get_logger(logname): """Return logger instance for config parsing.""" return logging.getLogger(logname + '.config') diff --git a/tests/generative/unit/test_topology.py b/tests/generative/unit/test_topology.py index 38659ecaaa..2c0d7d4ae8 100755 --- a/tests/generative/unit/test_topology.py +++ b/tests/generative/unit/test_topology.py @@ -27,9 +27,10 @@ import networkx from networkx.generators.atlas import graph_atlas_g -from clib.valve_test_lib import ValveTestBases, yaml_load, yaml_dump +from clib.valve_test_lib import ValveTestBases from clib.config_generator import FaucetFakeOFTopoGenerator +from faucet.config_parser_util import yaml_load, yaml_dump class ValveGenerativeBase(ValveTestBases.ValveTestNetwork): diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index c5fe3a18f3..fd48e1390e 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -29,11 +29,12 @@ from mininet.log import error from mininet.util import pmonitor +from faucet.config_parser_util import yaml_load + from clib import mininet_test_base from clib import mininet_test_util from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH -from clib.valve_test_lib import yaml_load MIN_MBPS = 100 diff --git a/tests/unit/faucet/test_valve.py b/tests/unit/faucet/test_valve.py index 6e9598cd00..bbdd3225f9 100644 --- a/tests/unit/faucet/test_valve.py +++ b/tests/unit/faucet/test_valve.py @@ -31,10 +31,11 @@ from faucet import valve_of from faucet import valve_packet +from faucet.config_parser_util import yaml_load, yaml_dump from clib.valve_test_lib import ( CONFIG, DP1_CONFIG, FAUCET_MAC, GROUP_DP1_CONFIG, IDLE_DP1_CONFIG, - ValveTestBases, yaml_load, yaml_dump) + ValveTestBases) from clib.fakeoftable import CONTROLLER_PORT diff --git a/tests/unit/faucet/test_valve_stack.py b/tests/unit/faucet/test_valve_stack.py index 338c294f1f..374022af74 100644 --- a/tests/unit/faucet/test_valve_stack.py +++ b/tests/unit/faucet/test_valve_stack.py @@ -31,11 +31,12 @@ from faucet.port import ( STACK_STATE_INIT, STACK_STATE_UP, LACP_PORT_SELECTED, LACP_PORT_UNSELECTED) +from faucet.config_parser_util import yaml_load, yaml_dump from clib.fakeoftable import CONTROLLER_PORT from clib.valve_test_lib import ( - BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases, yaml_load, yaml_dump) + BASE_DP1_CONFIG, CONFIG, STACK_CONFIG, STACK_LOOP_CONFIG, ValveTestBases) class ValveEdgeVLANTestCase(ValveTestBases.ValveTestNetwork): diff --git a/tests/unit/gauge/test_gauge.py b/tests/unit/gauge/test_gauge.py index de5633cc06..4422091421 100755 --- a/tests/unit/gauge/test_gauge.py +++ b/tests/unit/gauge/test_gauge.py @@ -28,7 +28,7 @@ from prometheus_client import CollectorRegistry from faucet import gauge, gauge_prom, gauge_influx, gauge_pollers, watcher, valve_util -from clib.valve_test_lib import yaml_load +from faucet.config_parser_util import yaml_load class QuietHandler(BaseHTTPRequestHandler): From f9eb9c3875017c586b072b9a30ac042ba654b714 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 22 Nov 2021 20:39:10 +0000 Subject: [PATCH 124/231] new style safe load/dump. --- faucet/config_parser_util.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/faucet/config_parser_util.py b/faucet/config_parser_util.py index 3bf6606798..76f8c1bcdb 100644 --- a/faucet/config_parser_util.py +++ b/faucet/config_parser_util.py @@ -19,22 +19,29 @@ import hashlib import logging import os -from ruamel.yaml import round_trip_load, round_trip_dump +from io import StringIO +from ruamel.yaml import YAML from ruamel.yaml.constructor import DuplicateKeyError from ruamel.yaml.scanner import ScannerError from ruamel.yaml.composer import ComposerError +from ruamel.yaml.constructor import ConstructorError +from ruamel.yaml.parser import ParserError CONFIG_HASH_FUNC = 'sha256' def yaml_load(yaml_str): """Wrap YAML load library.""" - return round_trip_load(yaml_str) + yml = YAML(typ='safe') + return yml.load(yaml_str) def yaml_dump(yaml_dict): """Wrap YAML dump library.""" - return round_trip_dump(yaml_dict) + with StringIO() as stream: + yml = YAML(typ='safe') + yml.dump(yaml_dict, stream=stream) + return stream.getvalue() def get_logger(logname): @@ -52,9 +59,8 @@ def read_config(config_file, logname): with open(config_file, 'r', encoding='utf-8') as stream: conf_txt = stream.read() conf = yaml_load(conf_txt) - except (TypeError, UnicodeDecodeError, - PermissionError, ValueError, - ScannerError, DuplicateKeyError, ComposerError) as err: # pytype: disable=name-error + except (TypeError, UnicodeDecodeError, PermissionError, ValueError, + ScannerError, DuplicateKeyError, ComposerError, ConstructorError, ParserError) as err: # pytype: disable=name-error logger.error('Error in file %s (%s)', config_file, str(err)) except FileNotFoundError as err: # pytype: disable=name-error logger.error('Could not find requested file: %s (%s)', config_file, str(err)) From 37e6ea7765e694ab549c19ad54489c45ac72aba7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 23 Nov 2021 19:51:54 +0000 Subject: [PATCH 125/231] lint. --- clib/config_generator.py | 2 +- faucet/config_parser_util.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/clib/config_generator.py b/clib/config_generator.py index 41a670ac49..223991a2c0 100644 --- a/clib/config_generator.py +++ b/clib/config_generator.py @@ -438,7 +438,7 @@ def get_interface_config(link_name, src_port, dst_node, dst_port, vlans, options 'description': f'output only {link_name}', } else: - raise GenerationError(f'Unknown {type} link type {vlans}') + raise GenerationError(f'Unknown {_type} link type {vlans}') if options: for option_key, option_value in options.items(): interface_config[option_key] = option_value diff --git a/faucet/config_parser_util.py b/faucet/config_parser_util.py index 76f8c1bcdb..9862a48c77 100644 --- a/faucet/config_parser_util.py +++ b/faucet/config_parser_util.py @@ -60,7 +60,8 @@ def read_config(config_file, logname): conf_txt = stream.read() conf = yaml_load(conf_txt) except (TypeError, UnicodeDecodeError, PermissionError, ValueError, - ScannerError, DuplicateKeyError, ComposerError, ConstructorError, ParserError) as err: # pytype: disable=name-error + ScannerError, DuplicateKeyError, ComposerError, + ConstructorError, ParserError) as err: # pytype: disable=name-error logger.error('Error in file %s (%s)', config_file, str(err)) except FileNotFoundError as err: # pytype: disable=name-error logger.error('Could not find requested file: %s (%s)', config_file, str(err)) From ec95fe5a961514c83d55578d1c5665caa6130e93 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 25 Nov 2021 01:21:44 +0000 Subject: [PATCH 126/231] pytype. --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 8dc9132e36..dd0e3b9a10 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -r docs/requirements.txt flake8==4.0.1 pylint==2.11.1 -pytype==2021.11.18 +pytype==2021.11.24 From a045d6441af765aeb0435d30f5104c398023074d Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 29 Nov 2021 18:34:07 +0000 Subject: [PATCH 127/231] pylint. --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index dd0e3b9a10..7f94039c86 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -r docs/requirements.txt flake8==4.0.1 -pylint==2.11.1 +pylint==2.12.1 pytype==2021.11.24 From 1d341c77d83a005f736de7c83bf2af74a43f5a85 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 27 Dec 2021 21:56:48 +0000 Subject: [PATCH 128/231] upgrade ruamel. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c74cbe9960..5e76f97145 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr==5.5.1 prometheus_client==0.12.0 -ruamel.yaml==0.17.17 +ruamel.yaml==0.17.19 os_ken==2.3.0 pytricia>=1.0.0 c65beka==1.0.0 From 744e0aaaaa3d1d4ea6a407f06d302a40675a0f84 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 10 Jan 2022 19:29:43 +0000 Subject: [PATCH 129/231] ruamel.yaml '20. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5e76f97145..dfaab0dcc4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr==5.5.1 prometheus_client==0.12.0 -ruamel.yaml==0.17.19 +ruamel.yaml==0.17.20 os_ken==2.3.0 pytricia>=1.0.0 c65beka==1.0.0 From 4cbe59374f4c5718074cff3cad61dce364bb98cc Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 25 Jan 2022 21:57:55 +0000 Subject: [PATCH 130/231] prom '13. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dfaab0dcc4..9bb711fbe6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ os_ken==2.3.0 influxdb>=2.12.0 networkx>=1.9 pbr==5.5.1 -prometheus_client==0.12.0 +prometheus_client==0.13.0 ruamel.yaml==0.17.20 os_ken==2.3.0 pytricia>=1.0.0 From 1f03e888177d06e8174f8463f038c93c3a79aed3 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 7 Feb 2022 08:38:27 +0000 Subject: [PATCH 131/231] pbr. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3244a83a63..64c3bfa13a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ c65chewie==1.0.2 influxdb>=2.12.0 networkx>=1.9 -pbr==5.5.1 +pbr==5.8.1 prometheus_client==0.13.1 ruamel.yaml==0.17.20 os_ken==2.3.0 From 25aeaffedc3c41e8447ee9827d3ca69aef022f93 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 12 Feb 2022 21:45:53 +0000 Subject: [PATCH 132/231] ruamel.yaml 0.17.21 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 64c3bfa13a..627e0c51cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr==5.8.1 prometheus_client==0.13.1 -ruamel.yaml==0.17.20 +ruamel.yaml==0.17.21 os_ken==2.3.0 c65beka==1.0.0 pytricia>=1.0.0 From 3cc54786283f4fcf921f72611a3b327380eea7a7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 17 Feb 2022 19:41:55 +0000 Subject: [PATCH 133/231] osken 2.3.1. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 627e0c51cb..d345fd70c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,6 @@ networkx>=1.9 pbr==5.8.1 prometheus_client==0.13.1 ruamel.yaml==0.17.21 -os_ken==2.3.0 +os_ken==2.3.1 c65beka==1.0.0 pytricia>=1.0.0 From 08d26972443a01acbd331761a0b583f00a867350 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 22 Feb 2022 21:52:00 +0000 Subject: [PATCH 134/231] test 3.10, but exclude pytype. --- .github/workflows/tests-unit.yml | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index fbaaf590bd..2ba3a27da1 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -5,6 +5,8 @@ on: [push, pull_request] env: FILES_CHANGED: "all" CODECOV_PY_VER: 3.8 + # TODO: pytype does not yet support python 3.10: https://github.com/google/pytype/issues/1022 + USING_PYTYPE: '3.7,3.8,3.9' jobs: unit-tests: @@ -12,7 +14,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9, '3.10'] steps: - name: Checkout repo uses: actions/checkout@v2 @@ -69,12 +71,14 @@ jobs: - if: ${{ env.FILES_CHANGED == 'all' || env.RQ_FILES_CHANGED || env.PY_FILES_CHANGED }} name: Run pytype run: | - ./docker/pip_deps.sh --extra-requirements="codecheck-requirements.txt" - cd ./tests/codecheck - if [[ "${{ env.FILES_CHANGED }}" == "all" || ! -z "${{ env.RQ_FILES_CHANGED }}" ]]; then - echo "Running pytype on everything" - ./pytype.sh - else - echo "Running pytype on ${{ env.PY_FILES_CHANGED }}" - ./pytype.sh ${{ env.PY_FILES_CHANGED }} + if ${{ contains(env.USING_PYTYPE, matrix.python-version) }} == 'true' ; then + ./docker/pip_deps.sh --extra-requirements="codecheck-requirements.txt" + cd ./tests/codecheck + if [[ "${{ env.FILES_CHANGED }}" == "all" || ! -z "${{ env.RQ_FILES_CHANGED }}" ]]; then + echo "Running pytype on everything" + ./pytype.sh + else + echo "Running pytype on ${{ env.PY_FILES_CHANGED }}" + ./pytype.sh ${{ env.PY_FILES_CHANGED }} + fi fi From 722d8333317bd996053ef4ad84aba1cafe671a87 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 23 Feb 2022 03:32:39 +0000 Subject: [PATCH 135/231] unify LRU cache limits, make flow verification cacheable. --- faucet/valve_of.py | 33 ++++++++++++-------------- faucet/valve_packet.py | 29 +++++++++++----------- faucet/valve_pipeline.py | 11 +++++---- faucet/valve_switch_standalone.py | 5 ++-- faucet/valve_table.py | 14 ++++++----- faucet/valve_util.py | 2 ++ tests/unit/faucet/test_valve_config.py | 7 +++--- 7 files changed, 52 insertions(+), 49 deletions(-) diff --git a/faucet/valve_of.py b/faucet/valve_of.py index 0f0dddfa12..2db1e9b2b1 100644 --- a/faucet/valve_of.py +++ b/faucet/valve_of.py @@ -36,6 +36,7 @@ from faucet.conf import test_config_condition, InvalidConfigError from faucet.valve_of_old import OLD_MATCH_FIELDS +from faucet.valve_util import LRU_MAX MIN_VID = 1 MAX_VID = 4095 @@ -393,7 +394,7 @@ def apply_meter(meter_id): return parser.OFPInstructionMeter(meter_id, ofp.OFPIT_METER) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def _apply_actions(actions): return parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS, actions) @@ -409,7 +410,7 @@ def apply_actions(actions): return _apply_actions(tuple(actions)) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def goto_table(table): """Return instruction to goto table. @@ -421,7 +422,7 @@ def goto_table(table): return parser.OFPInstructionGotoTable(table.table_id) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def goto_table_id(table_id): """Return instruction to goto table by table ID. @@ -448,7 +449,7 @@ def metadata_goto_table(metadata, mask, table): ] -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def set_field(**kwds): """Return action to set any field. @@ -482,7 +483,7 @@ def devid_present(vid): return vid ^ ofp.OFPVID_PRESENT -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def push_vlan_act(table, vlan_vid, eth_type=ether.ETH_TYPE_8021Q): """Return OpenFlow action list to push Ethernet 802.1Q header with VLAN VID. @@ -497,7 +498,6 @@ def push_vlan_act(table, vlan_vid, eth_type=ether.ETH_TYPE_8021Q): ] -@functools.lru_cache() def dec_ip_ttl(): """Return OpenFlow action to decrement IP TTL. @@ -507,7 +507,6 @@ def dec_ip_ttl(): return parser.OFPActionDecNwTtl() -@functools.lru_cache(maxsize=1024) def pop_vlan(): """Return OpenFlow action to pop outermost Ethernet 802.1Q VLAN header. @@ -550,7 +549,7 @@ def ct_nat(**kwds): return parser.NXActionNAT(**kwds) # pylint: disable=no-member -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def output_port(port_num, max_len=0): """Return OpenFlow action to output to a port. @@ -586,7 +585,7 @@ def dedupe_output_port_acts(output_port_acts): return [output_port(port) for port in sorted(output_ports)] -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def output_non_output_actions(flood_acts): """Split output actions into deduped actions, output ports, and non-output port actions. @@ -615,7 +614,6 @@ def output_non_output_actions(flood_acts): return (deduped_acts, output_ports, nonoutput_actions) -@functools.lru_cache() def output_in_port(): """Return OpenFlow action to output out input port. @@ -625,7 +623,7 @@ def output_in_port(): return output_port(OFP_IN_PORT) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def output_controller(max_len=MAX_PACKET_IN_BYTES): """Return OpenFlow action to packet in to the controller. @@ -655,7 +653,7 @@ def packetouts(port_nums, data): data=data) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def packetout(port_num, data): """Return OpenFlow action to packet out to dataplane from controller. @@ -668,7 +666,6 @@ def packetout(port_num, data): return packetouts([port_num], data) -@functools.lru_cache() def barrier(): """Return OpenFlow barrier request. @@ -694,7 +691,7 @@ def match(match_fields): return parser.OFPMatch(**match_fields) -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def valve_match_vid(value): return to_match_vid(value, ofp.OFPVID_PRESENT) @@ -772,7 +769,7 @@ def _match_ip_masked(ipa): return (str(ipa.ip), str(ipa.netmask)) -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def build_match_dict(in_port=None, vlan=None, eth_type=None, eth_src=None, eth_dst=None, eth_dst_mask=None, icmpv6_type=None, nw_proto=None, nw_dst=None, metadata=None, @@ -826,7 +823,7 @@ def build_match_dict(in_port=None, vlan=None, eth_type=None, eth_src=None, return match_dict -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def flowmod(cookie, command, table_id, priority, out_port, out_group, match_fields, inst, hard_timeout, idle_timeout, flags=0): return parser.OFPFlowMod( @@ -849,7 +846,7 @@ class NullRyuDatapath: ofproto = ofp -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def verify_flowmod(flowmod_msg): """Verify flowmod can be serialized.""" flowmod_msg.datapath = NullRyuDatapath() @@ -1016,7 +1013,7 @@ def is_global_meterdel(ofmsg): } -@functools.lru_cache() +@functools.lru_cache(maxsize=LRU_MAX) def _msg_kind(ofmsg): ofmsg_type = type(ofmsg) ofmsg_kind = _MSG_KINDS_TYPES.get(ofmsg_type, None) diff --git a/faucet/valve_packet.py b/faucet/valve_packet.py index 01aae4bc5b..16dea62cb6 100644 --- a/faucet/valve_packet.py +++ b/faucet/valve_packet.py @@ -33,6 +33,7 @@ from faucet import valve_util from faucet import valve_of +from faucet.valve_util import LRU_MAX FAUCET_MAC = '0e:00:00:00:00:01' # Default FAUCET MAC address @@ -72,7 +73,7 @@ MAC_MASK_BITMAP = {(2**EUI_BITS - 2**i): (EUI_BITS - i) for i in range(0, EUI_BITS + 1)} -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def mac_mask_bits(mac_mask): """Return number of bits in MAC mask or 0.""" if mac_mask is not None: @@ -80,20 +81,20 @@ def mac_mask_bits(mac_mask): return 0 -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def int_from_mac(mac): int_hi, int_lo = [int(i, 16) for i in mac.split(':')[-2:]] return (int_hi << 8) + int_lo -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def int_in_mac(mac, to_int): int_mac = mac.split(':')[:4] + [ '%x' % (to_int >> 8), '%x' % (to_int & 0xff)] return ':'.join(int_mac) -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def ipv4_parseable(ip_header_data): """Return True if an IPv4 packet we could parse.""" # TODO: python library parsers are fragile @@ -156,7 +157,7 @@ def parse_lldp(pkt): return pkt.get_protocol(lldp.lldp) -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def parse_packet_in_pkt(data, max_len, eth_pkt=None, vlan_pkt=None): """Parse a packet received via packet in from the dataplane. @@ -197,7 +198,7 @@ def parse_packet_in_pkt(data, max_len, eth_pkt=None, vlan_pkt=None): return (pkt, eth_pkt, eth_type, vlan_pkt, vlan_vid) -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def mac_addr_all_zeros(mac_addr): """Returns True if mac_addr is all zeros. @@ -210,7 +211,7 @@ def mac_addr_all_zeros(mac_addr): return mac_bin == DONTCARE -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def mac_addr_is_unicast(mac_addr): """Returns True if mac_addr is a unicast Ethernet address. @@ -378,7 +379,7 @@ def lacp_actor_up(lacp_pkt): return 0 -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def lacp_reqreply(eth_src, actor_system, actor_key, actor_port, actor_port_priority=0, @@ -461,7 +462,7 @@ def lacp_reqreply(eth_src, return pkt -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def arp_request(vid, eth_src, eth_dst, src_ip, dst_ip): """Return an ARP request packet. @@ -484,7 +485,7 @@ def arp_request(vid, eth_src, eth_dst, src_ip, dst_ip): return pkt -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def arp_reply(vid, eth_src, eth_dst, src_ip, dst_ip): """Return an ARP reply packet. @@ -530,7 +531,7 @@ def echo_reply(vid, eth_src, eth_dst, src_ip, dst_ip, data): return pkt -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def ipv6_link_eth_mcast(dst_ip): """Return an Ethernet multicast address from an IPv6 address. @@ -546,7 +547,7 @@ def ipv6_link_eth_mcast(dst_ip): return mcast_mac -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def ipv6_solicited_node_from_ucast(ucast): """Return IPv6 solicited node multicast address from IPv6 unicast address. @@ -563,7 +564,7 @@ def ipv6_solicited_node_from_ucast(ucast): return link_mcast -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def nd_request(vid, eth_src, eth_dst, src_ip, dst_ip): """Return IPv6 neighbor discovery request packet. @@ -596,7 +597,7 @@ def nd_request(vid, eth_src, eth_dst, src_ip, dst_ip): return pkt -@functools.lru_cache(maxsize=1024) +@functools.lru_cache(maxsize=LRU_MAX) def nd_advert(vid, eth_src, eth_dst, src_ip, dst_ip): """Return IPv6 neighbor avertisement packet. diff --git a/faucet/valve_pipeline.py b/faucet/valve_pipeline.py index 3833867e47..45c88f2b35 100644 --- a/faucet/valve_pipeline.py +++ b/faucet/valve_pipeline.py @@ -22,6 +22,7 @@ import faucet.faucet_metadata as faucet_md from faucet import valve_of from faucet.valve_manager_base import ValveManagerBase +from faucet.valve_util import LRU_MAX class ValvePipeline(ValveManagerBase): @@ -48,14 +49,14 @@ def __init__(self, dp): self.select_priority = self._HIGH_PRIORITY @staticmethod - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def _accept_to_table(table, actions): inst = [table.goto_this()] if actions: inst.append(valve_of.apply_actions(actions)) return tuple(inst) - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def accept_to_vlan(self, actions=None): """Get instructions to forward packet through the pipeline to vlan table. @@ -66,7 +67,7 @@ def accept_to_vlan(self, actions=None): """ return self._accept_to_table(self.vlan_table, actions) - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def accept_to_classification(self, actions=None): """Get instructions to forward packet through the pipeline to classification table. @@ -77,7 +78,7 @@ def accept_to_classification(self, actions=None): """ return self._accept_to_table(self.classification_table, actions) - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def accept_to_l2_forwarding(self, actions=None): """Get instructions to forward packet through the pipeline to l2 forwarding. @@ -88,7 +89,7 @@ def accept_to_l2_forwarding(self, actions=None): """ return self._accept_to_table(self.output_table, actions) - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def accept_to_egress(self, actions=None): """Get instructions to forward packet through the pipeline to egress table diff --git a/faucet/valve_switch_standalone.py b/faucet/valve_switch_standalone.py index aee0a2a55b..3997f27672 100644 --- a/faucet/valve_switch_standalone.py +++ b/faucet/valve_switch_standalone.py @@ -26,6 +26,7 @@ from faucet import valve_packet from faucet.valve_manager_base import ValveManagerBase from faucet.vlan import NullVLAN +from faucet.valve_util import LRU_MAX class ValveSwitchManager(ValveManagerBase): # pylint: disable=too-many-public-methods @@ -119,7 +120,7 @@ def floods_to_root(_dp_obj): """Return True if the given dp floods (only) to root switch""" return False - @functools.lru_cache(maxsize=1024) + @functools.lru_cache(maxsize=LRU_MAX) def _mask_flood_priority(self, eth_dst_mask): return self.flood_priority + valve_packet.mac_mask_bits(eth_dst_mask) @@ -162,7 +163,7 @@ def _build_flood_rule(self, match, command, flood_acts, flood_priority): inst=(valve_of.apply_actions(flood_acts),), priority=flood_priority) - @functools.lru_cache(maxsize=1024) + @functools.lru_cache(maxsize=LRU_MAX) def _vlan_flood_priority(self, eth_type, eth_dst_mask): priority = self._mask_flood_priority(eth_dst_mask) if eth_type: diff --git a/faucet/valve_table.py b/faucet/valve_table.py index d1680fb915..40078df85a 100644 --- a/faucet/valve_table.py +++ b/faucet/valve_table.py @@ -21,6 +21,7 @@ import struct from faucet import valve_of from faucet.faucet_pipeline import ValveTableConfig +from faucet.valve_util import LRU_MAX class ValveTable: # pylint: disable=too-many-arguments,too-many-instance-attributes @@ -89,7 +90,7 @@ def set_vlan_vid(self, vlan_vid): # TODO: verify actions @staticmethod - @functools.lru_cache(maxsize=1024) + @functools.lru_cache(maxsize=LRU_MAX) def match(in_port=None, vlan=None, # pylint: disable=too-many-arguments eth_type=None, eth_src=None, eth_dst=None, eth_dst_mask=None, icmpv6_type=None, nw_proto=None, nw_dst=None, metadata=None, @@ -102,6 +103,7 @@ def match(in_port=None, vlan=None, # pylint: disable=too-many-arguments vlan_pcp, udp_src, udp_dst) return valve_of.match(match_dict) + @functools.lru_cache(maxsize=LRU_MAX) def _verify_flowmod(self, flowmod): match_fields = flowmod.match.items() if valve_of.is_flowdel(flowmod): @@ -138,13 +140,13 @@ def _trim_actions(self, actions): pending_actions = [] else: pending_actions.append(action) - set_fields = {action.key for action in new_actions if valve_of.is_set_field(action)} - if self.table_id != valve_of.ofp.OFPTT_ALL and set_fields: - assert set_fields.issubset(self.set_fields), ( + if self.table_id != valve_of.ofp.OFPTT_ALL: + set_fields = {action.key for action in new_actions if valve_of.is_set_field(action)} + assert not set_fields or set_fields.issubset(self.set_fields), ( f'unexpected set fields {set_fields} configured {self.set_fields} in {self.name}') return new_actions - @functools.lru_cache() + @functools.lru_cache(maxsize=LRU_MAX) def _trim_inst(self, inst): """Discard empty/actions on packets that are not output and not goto another table.""" inst_types = {instruction.type for instruction in inst} @@ -192,7 +194,7 @@ def flowmod(self, match=None, priority=None, # pylint: disable=too-many-argumen out_port, out_group, match, - tuple(inst), + inst, hard_timeout, idle_timeout, flags) diff --git a/faucet/valve_util.py b/faucet/valve_util.py index f57ee3d06d..1a7516e6a9 100644 --- a/faucet/valve_util.py +++ b/faucet/valve_util.py @@ -23,6 +23,8 @@ import sys from functools import wraps +LRU_MAX = 4096 + def kill_on_exception(logname): """decorator to ensure functions will kill ryu when an unhandled exception diff --git a/tests/unit/faucet/test_valve_config.py b/tests/unit/faucet/test_valve_config.py index 719cf32968..8d6ad2e9ee 100755 --- a/tests/unit/faucet/test_valve_config.py +++ b/tests/unit/faucet/test_valve_config.py @@ -1013,8 +1013,6 @@ def load_orig_config(): load_orig_config() pstats_out, pstats_text = self.profile( partial(self.update_config, self.CONFIG, reload_type='cold')) - cache_info = valve_of.output_non_output_actions.cache_info() - self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info) total_tt_prop = ( pstats_out.total_tt / self.baseline_total_tt) # pytype: disable=attribute-error # must not be 20x slower, to ingest config for 100 interfaces than 1. @@ -1023,8 +1021,9 @@ def load_orig_config(): if total_tt_prop < 20: for valve in self.valves_manager.valves.values(): for table in valve.dp.tables.values(): - cache_info = table._trim_inst.cache_info() # pylint: disable=protected-access - self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info) + for cacheable_func in (table._trim_inst, table._verify_flowmod): # pylint: disable=protected-access + cache_info = cacheable_func.cache_info() + self.assertGreater(cache_info.hits, 0, msg=(table.name, cacheable_func, cache_info)) return time.sleep(i) From 0c14d818bbee7376149a3a07259604679ae1a457 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 01:02:41 +0000 Subject: [PATCH 136/231] disable periodic. --- .github/workflows/{ => disabled}/periodic.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ => disabled}/periodic.yml (100%) diff --git a/.github/workflows/periodic.yml b/.github/workflows/disabled/periodic.yml similarity index 100% rename from .github/workflows/periodic.yml rename to .github/workflows/disabled/periodic.yml From c81d2fe15af27555d3b5ade72c88c33de7244957 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 03:26:10 +0000 Subject: [PATCH 137/231] psutil/physical cores, conditional del-br. --- clib/clib_mininet_test_main.py | 4 ++-- clib/mininet_test_base.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index f0b50f16bf..53270a9fe4 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -21,9 +21,9 @@ import inspect import os import sys -import multiprocessing import pdb import pstats +import psutil import random import re import shutil @@ -364,7 +364,7 @@ def filter_test_hardware(test_obj, hw_config): def max_loadavg(): - return int(multiprocessing.cpu_count() * 1.5) + return int(psutil.cpu_count(logical=False) * 1.5) def expand_tests(modules, requested_test_classes, regex_test_classes, excluded_test_classes, diff --git a/clib/mininet_test_base.py b/clib/mininet_test_base.py index ca1fd66dd9..c885c7c17e 100644 --- a/clib/mininet_test_base.py +++ b/clib/mininet_test_base.py @@ -493,7 +493,7 @@ def tearDown(self, ignore_oferrors=False): for switch in self.net.switches: switch_names.append(switch.name) self.dump_switch_flows(switch) - switch.cmd(f'{self.VSCTL} del-br {switch.name}') + switch.cmd(f'{self.VSCTL} --if-exists del-br {switch.name}') self._stop_net() self.net = None if self.event_sock_dir and os.path.exists(self.event_sock_dir): From e9d555796d0b7a8ac69f55a554041d3146a16138 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 03:28:55 +0000 Subject: [PATCH 138/231] psutil. --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index 61fd5f783e..bcdc658658 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,6 +7,7 @@ exabgp==4.2.17 importlab>=0.3.1 netifaces packaging +psutil==5.8.0 requests requirements-parser scapy==2.4.4 From 9a8bda31b36b79d696943e2ea5ba18691645c46e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 03:30:48 +0000 Subject: [PATCH 139/231] psutil. --- clib/clib_mininet_test_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/clib_mininet_test_main.py b/clib/clib_mininet_test_main.py index 53270a9fe4..b9defa6ee8 100755 --- a/clib/clib_mininet_test_main.py +++ b/clib/clib_mininet_test_main.py @@ -23,7 +23,6 @@ import sys import pdb import pstats -import psutil import random import re import shutil @@ -39,6 +38,7 @@ from concurrencytest import ConcurrentTestSuite, fork_for_tests from mininet.log import setLogLevel from mininet.clean import Cleanup +import psutil from clib import mininet_test_util from clib.valve_test_lib import yaml_load, yaml_dump From 56082a60261b705dba5c8f8b0c6271b38fc06fd7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 07:36:53 +0000 Subject: [PATCH 140/231] image name. --- .github/workflows/disabled/periodic.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/disabled/periodic.yml b/.github/workflows/disabled/periodic.yml index c67bf50e20..4591cb753f 100644 --- a/.github/workflows/disabled/periodic.yml +++ b/.github/workflows/disabled/periodic.yml @@ -7,7 +7,7 @@ on: - cron: '0 0,12 * * *' env: - FAUCET_TEST_IMG: "faucet/tests" + FAUCET_TEST_IMG: "c65sdn/tests" SHARDARGS: "--privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 --ulimit core=99999999999:99999999999 -v /var/local/lib/docker:/var/lib/docker -v /var/tmp:/var/tmp" jobs: From 0e6842df160f3f8238e0da932de41996a9ca60c2 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 8 Mar 2022 07:53:24 +0000 Subject: [PATCH 141/231] reenable periodic. --- .github/workflows/{disabled => }/periodic.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{disabled => }/periodic.yml (100%) diff --git a/.github/workflows/disabled/periodic.yml b/.github/workflows/periodic.yml similarity index 100% rename from .github/workflows/disabled/periodic.yml rename to .github/workflows/periodic.yml From a2662528e94bbe849f999b867723ce02b6388bee Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 16 Mar 2022 18:42:59 +0000 Subject: [PATCH 142/231] disable periodic. --- .github/workflows/{ => disabled}/periodic.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ => disabled}/periodic.yml (100%) diff --git a/.github/workflows/periodic.yml b/.github/workflows/disabled/periodic.yml similarity index 100% rename from .github/workflows/periodic.yml rename to .github/workflows/disabled/periodic.yml From bef3261ec26e368715f38ed04dcc82caaa15316e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 30 Mar 2022 18:43:55 +0000 Subject: [PATCH 143/231] remove noise. --- "\033\033" | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 "\033\033" diff --git "a/\033\033" "b/\033\033" deleted file mode 100644 index 8444d7668a..0000000000 --- "a/\033\033" +++ /dev/null @@ -1,30 +0,0 @@ -diff --cc Dockerfile.faucet -index cac72a16,422b7bba..00000000 ---- a/Dockerfile.faucet -+++ b/Dockerfile.faucet -@@@ -1,6 -1,6 +1,10 @@@ - ## Image name: faucet/faucet -  -++<<<<<<< HEAD - +FROM c65sdn/python3:latest -++======= -+ FROM faucet/python3:8.0.0 -++>>>>>>> upstream/master -  - COPY ./ /faucet-src/ -  -diff --cc adapters/vendors/rabbitmq/Dockerfile -index 18f599b6,9f9c70e9..00000000 ---- a/adapters/vendors/rabbitmq/Dockerfile -+++ b/adapters/vendors/rabbitmq/Dockerfile -@@@ -1,6 -1,6 +1,10 @@@ - ## Image name: faucet/event-adapter-rabbitmq -  -++<<<<<<< HEAD - +FROM c65sdn/base:latest -++======= -+ FROM faucet/python3:8.0.0 -++>>>>>>> upstream/master - LABEL maintainer="Charlie Lewis " -  - ENV PYTHONUNBUFFERED=0 From 8e825563e9f114e8d61f5362d8f1373358d0425c Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 6 Apr 2022 00:00:46 +0000 Subject: [PATCH 144/231] prom 0.14.0. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d345fd70c6..09dd9b6a4b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ c65chewie==1.0.2 influxdb>=2.12.0 networkx>=1.9 pbr==5.8.1 -prometheus_client==0.13.1 +prometheus_client==0.14.0 ruamel.yaml==0.17.21 os_ken==2.3.1 c65beka==1.0.0 From 7101c8e4ebe084bf3cee28900e12c436553bf997 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 8 Apr 2022 22:40:45 +0000 Subject: [PATCH 145/231] prom client 0.14.1 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 09dd9b6a4b..85a12eeb0a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ c65chewie==1.0.2 influxdb>=2.12.0 networkx>=1.9 pbr==5.8.1 -prometheus_client==0.14.0 +prometheus_client==0.14.1 ruamel.yaml==0.17.21 os_ken==2.3.1 c65beka==1.0.0 From e4ea5a38c9865aaed2409f8e14a3078ee6067944 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 8 Apr 2022 22:45:26 +0000 Subject: [PATCH 146/231] influxdb 2.2 --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index e45b477f17..f11de01bc4 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -3,7 +3,7 @@ version: '2' services: influxdb: restart: always - image: 'influxdb:2.1' + image: 'influxdb:2.2' ports: - '8086' - '8083' From 6c2cb67178d5e9547187c602fce31bec73789a96 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 9 Apr 2022 10:52:15 +0000 Subject: [PATCH 147/231] grafana 8.4.5. --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index f11de01bc4..d889f9333b 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -31,7 +31,7 @@ services: grafana: restart: always - image: 'grafana/grafana:8.4.4' + image: 'grafana/grafana:8.4.5' user: 'root' ports: - '3000:3000' From 087a9db4f8c1ca6947c26bb1eb9aa2732c92c43a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 6 May 2022 05:36:00 +0000 Subject: [PATCH 148/231] Prevent MAC flush test from flaking. --- tests/integration/mininet_tests.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index f0553af48d..847069eea2 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -3555,12 +3555,11 @@ def test_port_change_vlan(self): self.change_port_config( self.port_map['port_2'], 'native_vlan', 200, restart=True, cold_start=False) - for port_name in ('port_1', 'port_2'): - self.wait_until_matching_flow( - {'in_port': int(self.port_map[port_name])}, - table_id=self._VLAN_TABLE, - actions=['SET_FIELD: {vlan_vid:4296}']) - self.assertEqual(0, len(self.scrape_prometheus(var='learned_l2_port'))) + self.wait_until_matching_flow( + {'in_port': int(self.port_map['port_2'])}, + table_id=self._VLAN_TABLE, + actions=['SET_FIELD: {vlan_vid:4296}']) + self.assertLess(4, len(self.scrape_prometheus(var='learned_l2_port'))) class FaucetConfigReloadEmptyAclTest(FaucetConfigReloadTestBase): From 7f7bf04cab5645170a3d7a9ea15e3d8a32033436 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 6 May 2022 06:01:18 +0000 Subject: [PATCH 149/231] sense. --- tests/integration/mininet_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index 847069eea2..c012f02dad 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -3559,7 +3559,7 @@ def test_port_change_vlan(self): {'in_port': int(self.port_map['port_2'])}, table_id=self._VLAN_TABLE, actions=['SET_FIELD: {vlan_vid:4296}']) - self.assertLess(4, len(self.scrape_prometheus(var='learned_l2_port'))) + self.assertLess(len(self.scrape_prometheus(var='learned_l2_port')), 4) class FaucetConfigReloadEmptyAclTest(FaucetConfigReloadTestBase): From 2898f01769bf28ea0d9278c344e0107aaeb23cba Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 May 2022 15:04:33 +0000 Subject: [PATCH 150/231] osken 2.4.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d52fbcfdde..623ecbd5b3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,6 @@ networkx>=1.9 pbr>=1.9 prometheus_client==0.14.1 ruamel.yaml==0.17.21 -os_ken==2.3.1 +os_ken==2.4.0 c65beka==1.0.0 pytricia>=1.0.0 From 16ece51d386cf9ee6bd4af44607695c3fcfd4383 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 May 2022 15:08:29 +0000 Subject: [PATCH 151/231] remove 3.7. --- .github/workflows/tests-unit.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 6d2f97655e..cb1abceb09 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -5,7 +5,7 @@ on: [push, pull_request] env: FILES_CHANGED: "all" CODECOV_PY_VER: 3.8 - USING_PYTYPE: '3.7,3.8,3.9,3.10' + USING_PYTYPE: '3.8,3.9,3.10' jobs: unit-tests: @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10'] + python-version: [3.8, 3.9, '3.10'] steps: - name: Checkout repo uses: actions/checkout@v3 From 585e7f324b47edf5758f32cd0ff0147b7dfc2748 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 May 2022 17:19:07 +0000 Subject: [PATCH 152/231] pylint. --- tests/integration/mininet_tests.py | 49 ++++++++++++++++-------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/tests/integration/mininet_tests.py b/tests/integration/mininet_tests.py index c012f02dad..2fd454cf8a 100644 --- a/tests/integration/mininet_tests.py +++ b/tests/integration/mininet_tests.py @@ -2875,7 +2875,7 @@ class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest): def _max_hosts(): # pylint: disable=no-method-argument,no-self-use return 512 - MAX_HOSTS = _max_hosts() + MAX_HOSTS = _max_hosts() # pylint: disable=too-many-function-args TEST_IPV4_NET = '10.0.0.0' TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4 LEARN_IPV4 = '10.0.254.254' @@ -2885,7 +2885,7 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use description: "untagged" max_hosts: %u faucet_vips: ["10.0.254.254/16"] -""" % (_max_hosts() + 4) +""" % (_max_hosts() + 4) # pylint: disable=too-many-function-args CONFIG = (""" ignore_learn_ins: 0 @@ -2894,7 +2894,10 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use eth_src: %u eth_dst: %u ipv4_fib: %u -""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) + """ +""" % (_max_hosts() + 64, # pylint: disable=too-many-function-args + _max_hosts() + 64, # pylint: disable=too-many-function-args + _max_hosts() + 64 # pylint: disable=too-many-function-args + ) + """ interfaces: %(port_1)d: native_vlan: 100 @@ -2923,7 +2926,7 @@ class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest): def _max_hosts(): # pylint: disable=no-method-argument,no-self-use return 1024 - MAX_HOSTS = _max_hosts() + MAX_HOSTS = _max_hosts() # pylint: disable=too-many-function-args TEST_IPV4_NET = '10.0.0.0' TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4 LEARN_IPV4 = '10.0.0.1' @@ -2932,7 +2935,7 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use 100: description: "untagged" max_hosts: %u -""" % (_max_hosts() + 4) +""" % (_max_hosts() + 4) # pylint: disable=too-many-function-args CONFIG = (""" ignore_learn_ins: 0 @@ -2940,7 +2943,9 @@ def _max_hosts(): # pylint: disable=no-method-argument,no-self-use table_sizes: eth_src: %u eth_dst: %u -""" % (_max_hosts() + 64, _max_hosts() + 64) + """ +""" % (_max_hosts() + 64, # pylint: disable=too-many-function-args + _max_hosts() + 64 # pylint: disable=too-many-function-args + ) + """ interfaces: %(port_1)d: native_vlan: 100 @@ -5833,9 +5838,9 @@ def global_vid(): # pylint: disable=no-method-argument,no-self-use NETPREFIX = 24 ETH_TYPE = IPV4_ETH NETNS = True - VIDS = _vids() - GLOBAL_VID = global_vid() - STR_VIDS = [str(i) for i in _vids()] + VIDS = _vids() # pylint: disable=too-many-function-args + GLOBAL_VID = global_vid() # pylint: disable=too-many-function-args + STR_VIDS = [str(i) for i in _vids()] # pylint: disable=too-many-function-args NEW_VIDS = VIDS[1:] @staticmethod @@ -5886,7 +5891,7 @@ def run_ip(self, args): native_vlan: 99 tagged_vlans: [%s] hairpin_unicast: True -""" % (global_vid(), +""" % (global_vid(), # pylint: disable=too-many-function-args len(STR_VIDS) * 3, # VLAN len(STR_VIDS) * 2, # VIP len(STR_VIDS) * 12, # Flood @@ -6015,9 +6020,9 @@ def _vids(): # pylint: disable=no-method-argument,no-self-use def global_vid(): # pylint: disable=no-method-argument,no-self-use return 2047 - VIDS = _vids() - GLOBAL_VID = global_vid() - STR_VIDS = [str(i) for i in _vids()] + VIDS = _vids() # pylint: disable=too-many-function-args + GLOBAL_VID = global_vid() # pylint: disable=too-many-function-args + STR_VIDS = [str(i) for i in _vids()] # pylint: disable=too-many-function-args NEW_VIDS = VIDS[1:] def netbase(self, vid, host): @@ -6067,7 +6072,7 @@ def run_ip(self, args): native_vlan: 99 tagged_vlans: [%s] hairpin_unicast: True -""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d', +""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d', # pylint: disable=too-many-function-args ','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS)) @@ -6076,8 +6081,8 @@ class FaucetTaggedScaleTest(FaucetTaggedTest): def _vids(): # pylint: disable=no-method-argument,no-self-use return list(range(100, 148)) - VIDS = _vids() - STR_VIDS = [str(i) for i in _vids()] + VIDS = _vids() # pylint: disable=too-many-function-args + STR_VIDS = [str(i) for i in _vids()] # pylint: disable=too-many-function-args NEW_VIDS = VIDS[1:] CONFIG_GLOBAL = """ @@ -7705,12 +7710,12 @@ class FaucetDestRewriteTest(FaucetUntaggedTest): def override_mac(): # pylint: disable=no-method-argument,no-self-use return '0e:00:00:00:00:02' - OVERRIDE_MAC = override_mac() + OVERRIDE_MAC = override_mac() # pylint: disable=too-many-function-args def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use return '0e:00:00:00:00:03' - REWRITE_MAC = rewrite_mac() + REWRITE_MAC = rewrite_mac() # pylint: disable=too-many-function-args CONFIG_GLOBAL = """ vlans: @@ -7729,7 +7734,7 @@ def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use - rule: actions: allow: 1 -""" % (override_mac(), rewrite_mac()) +""" % (override_mac(), rewrite_mac()) # pylint: disable=too-many-function-args CONFIG = """ interfaces: %(port_1)d: @@ -7794,12 +7799,12 @@ class FaucetDestRewriteOrderedTest(FaucetUntaggedTest): def override_mac(): # pylint: disable=no-method-argument,no-self-use return '0e:00:00:00:00:02' - OVERRIDE_MAC = override_mac() + OVERRIDE_MAC = override_mac() # pylint: disable=too-many-function-args def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use return '0e:00:00:00:00:03' - REWRITE_MAC = rewrite_mac() + REWRITE_MAC = rewrite_mac() # pylint: disable=too-many-function-args CONFIG_GLOBAL = """ vlans: @@ -7818,7 +7823,7 @@ def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use - rule: actions: allow: 1 -""" % (override_mac(), rewrite_mac()) +""" % (override_mac(), rewrite_mac()) # pylint: disable=too-many-function-args CONFIG = """ interfaces: %(port_1)d: From 8b86f5d023f1c401033a8c62356ccaff19b73406 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 16 Jun 2022 02:44:19 +0000 Subject: [PATCH 153/231] No testit.sh --- testit.sh | 8 -------- 1 file changed, 8 deletions(-) delete mode 100755 testit.sh diff --git a/testit.sh b/testit.sh deleted file mode 100755 index 4852aff855..0000000000 --- a/testit.sh +++ /dev/null @@ -1,8 +0,0 @@ -sudo docker build --pull -t c65faucet/tests -f Dockerfile.tests . -sudo docker run --name=faucet-tests \ - --sysctl net.ipv6.conf.all.disable_ipv6=0 --privileged --cap-add=ALL --rm \ - -v /lib/modules:/lib/modules \ - -v /var/local/lib/docker:/var/lib/docker \ - -v /tmp/faucet-pip-cache:/var/tmp/pip-cache \ - -e FAUCET_TESTS="-un FaucetUntaggedTest" \ - -ti c65faucet/tests From abf91fec697881b2397cc9878ad437a2d88394a1 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 28 Aug 2022 21:05:38 +0000 Subject: [PATCH 154/231] os_ken 2.5.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 623ecbd5b3..c313ed57d0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,6 +4,6 @@ networkx>=1.9 pbr>=1.9 prometheus_client==0.14.1 ruamel.yaml==0.17.21 -os_ken==2.4.0 +os_ken==2.5.0 c65beka==1.0.0 pytricia>=1.0.0 From 23fee675cf2cde6ce7ce3bf411e1210da85b2d02 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 28 Aug 2022 21:05:38 +0000 Subject: [PATCH 155/231] Use c65sdn os-ken. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 623ecbd5b3..1d293a5337 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,9 +1,9 @@ c65chewie==1.0.2 +git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.14.1 ruamel.yaml==0.17.21 -os_ken==2.4.0 c65beka==1.0.0 pytricia>=1.0.0 From 4f21165de0e79d43b26bd8c937ab13b724008a39 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Sep 2022 02:25:35 +0000 Subject: [PATCH 156/231] workaround for cannot install configs under pyproject. --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index e2ca068682..7eff9acf03 100755 --- a/setup.py +++ b/setup.py @@ -79,6 +79,8 @@ def setup_faucet_log(): setup_ryu_conf() setup_faucet_conf() setup_faucet_log() + except FileNotFound as exception: + print(str(exception)) except OSError as exception: if exception.errno == errno.EACCES: print(f"Permission denied creating {exception.filename}, skipping copying configs") From dd67c3b1241d91a157422a4a4e8844cbf312ad91 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Sep 2022 02:27:31 +0000 Subject: [PATCH 157/231] handle logdir exception separately. --- setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7eff9acf03..07d55eebd9 100755 --- a/setup.py +++ b/setup.py @@ -78,7 +78,6 @@ def setup_faucet_log(): try: setup_ryu_conf() setup_faucet_conf() - setup_faucet_log() except FileNotFound as exception: print(str(exception)) except OSError as exception: @@ -87,6 +86,11 @@ def setup_faucet_log(): else: raise + try: + setup_faucet_log() + except OSError as exception: + print(str(exception)) + setup( name='faucet', From c120b4d54717f302317bad12d6a039bb94b8d909 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 7 Sep 2022 02:30:23 +0000 Subject: [PATCH 158/231] not found. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 07d55eebd9..2caf9205cb 100755 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ def setup_faucet_log(): try: setup_ryu_conf() setup_faucet_conf() - except FileNotFound as exception: + except FileNotFoundError as exception: print(str(exception)) except OSError as exception: if exception.errno == errno.EACCES: From b43b3c24e9b9d786b54c6525d9a1e6c4ae45ad95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Oct 2022 21:02:58 +0000 Subject: [PATCH 159/231] Bump tinyrpc from 1.1.2 to 1.1.5 Bumps [tinyrpc](https://github.com/mbr/tinyrpc) from 1.1.2 to 1.1.5. - [Release notes](https://github.com/mbr/tinyrpc/releases) - [Commits](https://github.com/mbr/tinyrpc/commits) --- updated-dependencies: - dependency-name: tinyrpc dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index a6c8730269..776dbc7f5e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -12,4 +12,4 @@ requests requirements-parser scapy==2.4.4 webob==1.8.7 -tinyrpc==1.1.2 +tinyrpc==1.1.5 From 22c3bc1b1b1c4a1bd37a40e29957764e5b15d36c Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 13 Oct 2022 19:31:56 +0000 Subject: [PATCH 160/231] prometheus 0.15.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1d293a5337..cfeb1df7e0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 -prometheus_client==0.14.1 +prometheus_client==0.15.0 ruamel.yaml==0.17.21 c65beka==1.0.0 pytricia>=1.0.0 From 5f7d90dcb7ad3837dee517f512afe6ac98df8d10 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 13 Oct 2022 23:07:50 +0000 Subject: [PATCH 161/231] Don't need to rm ofconfig. --- docker/install-faucet.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/docker/install-faucet.sh b/docker/install-faucet.sh index dc9b04a776..3900fd808a 100755 --- a/docker/install-faucet.sh +++ b/docker/install-faucet.sh @@ -36,8 +36,6 @@ done rm -r "${HOME}/.cache" rm -r "${FROOT}" rm -r /usr/local/lib/python3*/site-packages/os_ken/tests/ -rm -r /usr/local/lib/python3*/site-packages/os_ken/lib/of_config/ -rm /usr/local/lib/python3*/site-packages/os_ken/cmd/of_config_cli.py # Smoke test faucet -V || exit 1 From 1e4b2bbd48c5549a07783d6dc8ee5d9431911b24 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Oct 2022 03:42:18 +0000 Subject: [PATCH 162/231] Bump c65chewie from 1.0.2 to 1.0.3 Bumps [c65chewie](https://github.com/faucetsdn/chewie) from 1.0.2 to 1.0.3. - [Release notes](https://github.com/faucetsdn/chewie/releases) - [Commits](https://github.com/faucetsdn/chewie/commits) --- updated-dependencies: - dependency-name: c65chewie dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cfeb1df7e0..909d780303 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -c65chewie==1.0.2 +c65chewie==1.0.3 git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 From 42be7590f7f194af5870db1ad1c0dfceaff714f7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 2 Nov 2022 11:39:04 +1300 Subject: [PATCH 163/231] drop 3.7. --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index da4d29093e..2a18881530 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10', 3.11] + python-version: [3.8, 3.9, '3.10', 3.11] steps: - name: Checkout repo uses: actions/checkout@v3 From d7918d97f10184e9c6dc7eb782bdd593119b304f Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 2 Nov 2022 11:55:02 +1300 Subject: [PATCH 164/231] f string. --- clib/tcpdump_helper.py | 1 + 1 file changed, 1 insertion(+) diff --git a/clib/tcpdump_helper.py b/clib/tcpdump_helper.py index 2831ae8e6d..cfed937139 100644 --- a/clib/tcpdump_helper.py +++ b/clib/tcpdump_helper.py @@ -31,6 +31,7 @@ def __init__(self, tcpdump_host, tcpdump_filter, funcs=None, self.intf_name = self.intf_name.split('.')[0] tcpdump_flags = vflags + # pylint: disable=consider-using-f-string tcpdump_flags += ' -Z root' tcpdump_flags += ' -c %u' % packets if packets else '' tcpdump_flags += ' -w %s' % pcap_out if pcap_out else '' From c761f42869a031c2cdfb0d2838025c61244ed6ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 2 Nov 2022 03:47:39 +0000 Subject: [PATCH 165/231] Bump sphinx-rtd-theme from 1.0.0 to 1.1.0 Bumps [sphinx-rtd-theme](https://github.com/readthedocs/sphinx_rtd_theme) from 1.0.0 to 1.1.0. - [Release notes](https://github.com/readthedocs/sphinx_rtd_theme/releases) - [Changelog](https://github.com/readthedocs/sphinx_rtd_theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/readthedocs/sphinx_rtd_theme/compare/1.0.0...1.1.0) --- updated-dependencies: - dependency-name: sphinx-rtd-theme dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 930a23d2c7..2c6b635a3c 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt sphinx==5.3.0 -sphinx_rtd_theme==1.0.0 +sphinx_rtd_theme==1.1.0 sphinxcontrib-svg2pdfconverter==1.2.1 From 96cc5c35cceed250d13b3a2a5b38f8ae86bc36c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Nov 2022 03:18:40 +0000 Subject: [PATCH 166/231] Bump psutil from 5.9.3 to 5.9.4 Bumps [psutil](https://github.com/giampaolo/psutil) from 5.9.3 to 5.9.4. - [Release notes](https://github.com/giampaolo/psutil/releases) - [Changelog](https://github.com/giampaolo/psutil/blob/master/HISTORY.rst) - [Commits](https://github.com/giampaolo/psutil/compare/release-5.9.3...release-5.9.4) --- updated-dependencies: - dependency-name: psutil dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 5236b625d0..20fb42a672 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,7 +7,7 @@ exabgp==4.2.21 importlab>=0.3.1 netifaces packaging -psutil==5.9.3 +psutil==5.9.4 requests requirements-parser scapy==2.4.4 From f874abebfd9a31adab38624d6f569a87255b6e15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Nov 2022 03:18:56 +0000 Subject: [PATCH 167/231] Bump sphinx-rtd-theme from 1.1.0 to 1.1.1 Bumps [sphinx-rtd-theme](https://github.com/readthedocs/sphinx_rtd_theme) from 1.1.0 to 1.1.1. - [Release notes](https://github.com/readthedocs/sphinx_rtd_theme/releases) - [Changelog](https://github.com/readthedocs/sphinx_rtd_theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/readthedocs/sphinx_rtd_theme/compare/1.1.0...1.1.1) --- updated-dependencies: - dependency-name: sphinx-rtd-theme dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 2c6b635a3c..d346dd7918 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt sphinx==5.3.0 -sphinx_rtd_theme==1.1.0 +sphinx_rtd_theme==1.1.1 sphinxcontrib-svg2pdfconverter==1.2.1 From da4444268462a03c7869564d3bffb6e02209f37e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 13 Nov 2022 21:12:47 +0000 Subject: [PATCH 168/231] pylint. --- setup.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 1525492e6c..7c65c59d2c 100755 --- a/setup.py +++ b/setup.py @@ -82,11 +82,9 @@ def setup_faucet_log(): print(str(exception)) except OSError as exception: if exception.errno == errno.EACCES: - print("Permission denied creating %s, skipping copying configs" - % exception.filename) + print(f"Permission denied creating {exception.filename}, skipping copying configs") elif exception.errno == errno.ENOENT: - print("File not found creating %s, skipping copying configs" - % exception.filename) + print(f"File not found creating {exception.filename}, skipping copying configs") else: raise From 2151e6a6dfb068c7608830fe5efb699e98c2b0d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Dec 2022 03:18:42 +0000 Subject: [PATCH 169/231] Bump pylint from 2.15.8 to 2.15.9 Bumps [pylint](https://github.com/PyCQA/pylint) from 2.15.8 to 2.15.9. - [Release notes](https://github.com/PyCQA/pylint/releases) - [Commits](https://github.com/PyCQA/pylint/compare/v2.15.8...v2.15.9) --- updated-dependencies: - dependency-name: pylint dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index e3ef31b801..ff01000bef 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,3 +1,3 @@ flake8==5.0.4 -pylint==2.15.8 +pylint==2.15.9 pytype==2022.10.26 From b31dc46cb0b042bd3499e79423218936c5a4ac3f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 7 Jan 2023 07:23:27 +0000 Subject: [PATCH 170/231] Bump flake8 from 5.0.4 to 6.0.0 Bumps [flake8](https://github.com/pycqa/flake8) from 5.0.4 to 6.0.0. - [Release notes](https://github.com/pycqa/flake8/releases) - [Commits](https://github.com/pycqa/flake8/compare/5.0.4...6.0.0) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index ff01000bef..a058f96476 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,3 +1,3 @@ -flake8==5.0.4 +flake8==6.0.0 pylint==2.15.9 pytype==2022.10.26 From 4f7a60d035c1f0b63ebb74cfe668b41114a26fa8 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 20 Jan 2023 19:00:37 +1300 Subject: [PATCH 171/231] downgrade os_ken. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7faf4ba854..cdb15a058d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,5 +9,5 @@ c65beka==1.0.0 # dnspython 2.3.0 is not compatible with eventlet # https://github.com/eventlet/eventlet/issues/781 dnspython<2.3.0 -os_ken==2.6.0 +os_ken==2.3.1 pytricia>=1.0.0 From 14a9bbdf3aceddc99972db2893f8a8e786bd3011 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 20 Jan 2023 19:03:16 +1300 Subject: [PATCH 172/231] os-ken. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cdb15a058d..0025ee7cbc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ c65chewie==1.0.3 -git+https://github.com/c65sdn/os-ken@1.0.0 +# git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 From 5c7c23ccc59207dca6f6f423330e95541ecda279 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 03:39:59 +0000 Subject: [PATCH 173/231] Bump os-ken from 2.3.1 to 2.6.0 Bumps [os-ken](http://www.openstack.org/) from 2.3.1 to 2.6.0. --- updated-dependencies: - dependency-name: os-ken dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0025ee7cbc..b9ce13a973 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,5 +9,5 @@ c65beka==1.0.0 # dnspython 2.3.0 is not compatible with eventlet # https://github.com/eventlet/eventlet/issues/781 dnspython<2.3.0 -os_ken==2.3.1 +os_ken==2.6.0 pytricia>=1.0.0 From 38fbe084e785aa098ce6de7c4de037b18cd1786b Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 25 Jan 2023 12:40:25 +0000 Subject: [PATCH 174/231] prom client 0.16. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0025ee7cbc..e17699151c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ c65chewie==1.0.3 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 -prometheus_client==0.15.0 +prometheus_client==0.16.0 ruamel.yaml==0.17.21 c65beka==1.0.0 # dnspython 2.3.0 is not compatible with eventlet From ebed970e44746179f6d81a8debc376b5ebe53916 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 01:06:21 +0000 Subject: [PATCH 175/231] pylint. --- clib/ofctl_rest/ofctl_rest.py | 763 ---------------------------------- clib/ofctl_rest/wsgi.py | 312 -------------- ofctl_rest/ofctl_rest.py | 90 ++-- ofctl_rest/wsgi.py | 41 +- tests/codecheck/src_files.sh | 2 +- 5 files changed, 68 insertions(+), 1140 deletions(-) delete mode 100644 clib/ofctl_rest/ofctl_rest.py delete mode 100644 clib/ofctl_rest/wsgi.py diff --git a/clib/ofctl_rest/ofctl_rest.py b/clib/ofctl_rest/ofctl_rest.py deleted file mode 100644 index b2b136b8f2..0000000000 --- a/clib/ofctl_rest/ofctl_rest.py +++ /dev/null @@ -1,763 +0,0 @@ -# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import logging -import json -import ast - -from os_ken.base import app_manager -from os_ken.controller import ofp_event -from os_ken.controller import dpset -from os_ken.controller.handler import MAIN_DISPATCHER -from os_ken.controller.handler import set_ev_cls -from os_ken.exception import OSKenException -from os_ken.ofproto import ofproto_v1_3 -from os_ken.lib import ofctl_v1_3 -from os_ken.lib import hub -from wsgi import ControllerBase -from wsgi import Response -from wsgi import WSGIApplication, WSGIServer - -LOG = logging.getLogger('os_ken.app.ofctl_rest') - -DEFAULT_WSGI_HOST = '0.0.0.0' -DEFAULT_WSGI_PORT = 8080 - -OFCTL_HOST = os.getenv('OFCTL_HOST', '0.0.0.0') -OFCTL_PORT = int(os.getenv('OFCTL_PORT', '8080')) - -# supported ofctl versions in this restful app -supported_ofctl = { - ofproto_v1_3.OFP_VERSION: ofctl_v1_3, -} - -# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods,disable=unused-argument,disable=no-member - -# REST API -# - -# Retrieve the switch stats -# -# get the list of all switches -# GET /stats/switches -# -# get the desc stats of the switch -# GET /stats/desc/ -# -# get flows desc stats of the switch -# GET /stats/flowdesc/ -# -# get flows desc stats of the switch filtered by the fields -# POST /stats/flowdesc/ -# -# get flows stats of the switch -# GET /stats/flow/ -# -# get flows stats of the switch filtered by the fields -# POST /stats/flow/ -# -# get aggregate flows stats of the switch -# GET /stats/aggregateflow/ -# -# get aggregate flows stats of the switch filtered by the fields -# POST /stats/aggregateflow/ -# -# get table stats of the switch -# GET /stats/table/ -# -# get table features stats of the switch -# GET /stats/tablefeatures/ -# -# get ports stats of the switch -# GET /stats/port/[/] -# Note: Specification of port number is optional -# -# get queues stats of the switch -# GET /stats/queue/[/[/]] -# Note: Specification of port number and queue id are optional -# If you want to omitting the port number and setting the queue id, -# please specify the keyword "ALL" to the port number -# e.g. GET /stats/queue/1/ALL/1 -# -# get queues config stats of the switch -# GET /stats/queueconfig/[/] -# Note: Specification of port number is optional -# -# get queues desc stats of the switch -# GET /stats/queuedesc/[/[/]] -# Note: Specification of port number and queue id are optional -# If you want to omitting the port number and setting the queue id, -# please specify the keyword "ALL" to the port number -# e.g. GET /stats/queuedesc/1/ALL/1 -# -# get meter features stats of the switch -# GET /stats/meterfeatures/ -# -# get meter config stats of the switch -# GET /stats/meterconfig/[/] -# Note: Specification of meter id is optional -# -# get meter desc stats of the switch -# GET /stats/meterdesc/[/] -# Note: Specification of meter id is optional -# -# get meters stats of the switch -# GET /stats/meter/[/] -# Note: Specification of meter id is optional -# -# get group features stats of the switch -# GET /stats/groupfeatures/ -# -# get groups desc stats of the switch -# GET /stats/groupdesc/[/] -# Note: Specification of group id is optional (OpenFlow 1.5 or later) -# -# get groups stats of the switch -# GET /stats/group/[/] -# Note: Specification of group id is optional -# -# get ports description of the switch -# GET /stats/portdesc/[/] -# Note: Specification of port number is optional (OpenFlow 1.5 or later) - -# Update the switch stats -# -# add a flow entry -# POST /stats/flowentry/add -# -# modify all matching flow entries -# POST /stats/flowentry/modify -# -# modify flow entry strictly matching wildcards and priority -# POST /stats/flowentry/modify_strict -# -# delete all matching flow entries -# POST /stats/flowentry/delete -# -# delete flow entry strictly matching wildcards and priority -# POST /stats/flowentry/delete_strict -# -# delete all flow entries of the switch -# DELETE /stats/flowentry/clear/ -# -# add a meter entry -# POST /stats/meterentry/add -# -# modify a meter entry -# POST /stats/meterentry/modify -# -# delete a meter entry -# POST /stats/meterentry/delete -# -# add a group entry -# POST /stats/groupentry/add -# -# modify a group entry -# POST /stats/groupentry/modify -# -# delete a group entry -# POST /stats/groupentry/delete -# -# modify behavior of the physical port -# POST /stats/portdesc/modify -# -# modify role of controller -# POST /stats/role -# -# -# send a experimeter message -# POST /stats/experimenter/ - - -class CommandNotFoundError(OSKenException): - message = 'No such command : %(cmd)s' - - -class PortNotFoundError(OSKenException): - message = 'No such port info: %(port_no)s' - - -def stats_method(method): - def wrapper(self, req, dpid, *args, **kwargs): - # Get datapath instance from DPSet - try: - dp = self.dpset.get(int(str(dpid), 0)) - except ValueError: - LOG.exception('Invalid dpid: %s', dpid) - return Response(status=400) - if dp is None: - LOG.error('No such Datapath: %s', dpid) - return Response(status=404) - - # Get lib/ofctl_* module - try: - ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) - except KeyError: - LOG.exception('Unsupported OF version: %s', - dp.ofproto.OFP_VERSION) - return Response(status=501) - - # Invoke StatsController method - try: - ret = method(self, req, dp, ofctl, *args, **kwargs) - return Response(content_type='application/json', - body=json.dumps(ret)) - except ValueError: - LOG.exception('Invalid syntax: %s', req.body) - return Response(status=400) - except AttributeError: - LOG.exception('Unsupported OF request in this version: %s', - dp.ofproto.OFP_VERSION) - return Response(status=501) - - return wrapper - - -def command_method(method): - def wrapper(self, req, *args, **kwargs): - # Parse request json body - try: - if req.body: - # We use ast.literal_eval() to parse request json body - # instead of json.loads(). - # Because we need to parse binary format body - # in send_experimenter(). - body = ast.literal_eval(req.body.decode('utf-8')) - else: - body = {} - except SyntaxError: - LOG.exception('Invalid syntax: %s', req.body) - return Response(status=400) - - # Get datapath_id from request parameters - dpid = body.get('dpid', None) - if not dpid: - try: - dpid = kwargs.pop('dpid') - except KeyError: - LOG.exception('Cannot get dpid from request parameters') - return Response(status=400) - - # Get datapath instance from DPSet - try: - dp = self.dpset.get(int(str(dpid), 0)) - except ValueError: - LOG.exception('Invalid dpid: %s', dpid) - return Response(status=400) - if dp is None: - LOG.error('No such Datapath: %s', dpid) - return Response(status=404) - - # Get lib/ofctl_* module - try: - ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION) - except KeyError: - LOG.exception('Unsupported OF version: version=%s', - dp.ofproto.OFP_VERSION) - return Response(status=501) - - # Invoke StatsController method - try: - method(self, req, dp, ofctl, body, *args, **kwargs) - return Response(status=200) - except ValueError: - LOG.exception('Invalid syntax: %s', req.body) - return Response(status=400) - except AttributeError: - LOG.exception('Unsupported OF request in this version: %s', - dp.ofproto.OFP_VERSION) - return Response(status=501) - except CommandNotFoundError as e: - LOG.exception(e.message) - return Response(status=404) - except PortNotFoundError as e: - LOG.exception(e.message) - return Response(status=404) - - return wrapper - - -class StatsController(ControllerBase): - # pytype: disable=attribute-error - - def __init__(self, req, link, data, **config): - super().__init__(req, link, data, **config) - self.dpset = data['dpset'] - self.waiters = data['waiters'] - - def get_dpids(self, req, **_kwargs): - dps = list(self.dpset.dps.keys()) - body = json.dumps(dps) - return Response(content_type='application/json', body=body) - - @stats_method - def get_desc_stats(self, req, dp, ofctl, **kwargs): - return ofctl.get_desc_stats(dp, self.waiters) - - @stats_method - def get_flow_desc(self, req, dp, ofctl, **kwargs): - flow = req.json if req.body else {} - return ofctl.get_flow_desc(dp, self.waiters, flow) - - @stats_method - def get_flow_stats(self, req, dp, ofctl, **kwargs): - flow = req.json if req.body else {} - return ofctl.get_flow_stats(dp, self.waiters, flow) - - @stats_method - def get_aggregate_flow_stats(self, req, dp, ofctl, **kwargs): - flow = req.json if req.body else {} - return ofctl.get_aggregate_flow_stats(dp, self.waiters, flow) - - @stats_method - def get_table_stats(self, req, dp, ofctl, **kwargs): - return ofctl.get_table_stats(dp, self.waiters) - - @stats_method - def get_table_features(self, req, dp, ofctl, **kwargs): - return ofctl.get_table_features(dp, self.waiters) - - @stats_method - def get_port_stats(self, req, dp, ofctl, port=None, **kwargs): - if port == "ALL": - port = None - - return ofctl.get_port_stats(dp, self.waiters, port) - - @stats_method - def get_queue_stats(self, req, dp, ofctl, - port=None, queue_id=None, **kwargs): - if port == "ALL": - port = None - - if queue_id == "ALL": - queue_id = None - - return ofctl.get_queue_stats(dp, self.waiters, port, queue_id) - - @stats_method - def get_queue_config(self, req, dp, ofctl, port=None, **kwargs): - if port == "ALL": - port = None - - return ofctl.get_queue_config(dp, self.waiters, port) - - @stats_method - def get_queue_desc(self, req, dp, ofctl, - port=None, queue=None, **_kwargs): - if port == "ALL": - port = None - - if queue == "ALL": - queue = None - - return ofctl.get_queue_desc(dp, self.waiters, port, queue) - - @stats_method - def get_meter_features(self, req, dp, ofctl, **kwargs): - return ofctl.get_meter_features(dp, self.waiters) - - @stats_method - def get_meter_config(self, req, dp, ofctl, meter_id=None, **kwargs): - if meter_id == "ALL": - meter_id = None - - return ofctl.get_meter_config(dp, self.waiters, meter_id) - - @stats_method - def get_meter_desc(self, req, dp, ofctl, meter_id=None, **kwargs): - if meter_id == "ALL": - meter_id = None - - return ofctl.get_meter_desc(dp, self.waiters, meter_id) - - @stats_method - def get_meter_stats(self, req, dp, ofctl, meter_id=None, **kwargs): - if meter_id == "ALL": - meter_id = None - - return ofctl.get_meter_stats(dp, self.waiters, meter_id) - - @stats_method - def get_group_features(self, req, dp, ofctl, **kwargs): - return ofctl.get_group_features(dp, self.waiters) - - @stats_method - def get_group_desc(self, req, dp, ofctl, group_id=None, **kwargs): - return ofctl.get_group_desc(dp, self.waiters) - - @stats_method - def get_group_stats(self, req, dp, ofctl, group_id=None, **kwargs): - if group_id == "ALL": - group_id = None - - return ofctl.get_group_stats(dp, self.waiters, group_id) - - @stats_method - def get_port_desc(self, req, dp, ofctl, port_no=None, **kwargs): - return ofctl.get_port_desc(dp, self.waiters) - - @stats_method - def get_role(self, req, dp, ofctl, **kwargs): - return ofctl.get_role(dp, self.waiters) - - @staticmethod - @command_method - def mod_flow_entry(req, dp, ofctl, flow, cmd, **kwargs): - cmd_convert = { - 'add': dp.ofproto.OFPFC_ADD, - 'modify': dp.ofproto.OFPFC_MODIFY, - 'modify_strict': dp.ofproto.OFPFC_MODIFY_STRICT, - 'delete': dp.ofproto.OFPFC_DELETE, - 'delete_strict': dp.ofproto.OFPFC_DELETE_STRICT, - } - mod_cmd = cmd_convert.get(cmd, None) - if mod_cmd is None: - raise CommandNotFoundError(cmd=cmd) - - ofctl.mod_flow_entry(dp, flow, mod_cmd) - - @staticmethod - @command_method - def delete_flow_entry(req, dp, ofctl, flow, **kwargs): - flow = {'table_id': dp.ofproto.OFPTT_ALL} - ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) - - @staticmethod - @command_method - def mod_meter_entry(req, dp, ofctl, meter, cmd, **kwargs): - cmd_convert = { - 'add': dp.ofproto.OFPMC_ADD, - 'modify': dp.ofproto.OFPMC_MODIFY, - 'delete': dp.ofproto.OFPMC_DELETE, - } - mod_cmd = cmd_convert.get(cmd, None) - if mod_cmd is None: - raise CommandNotFoundError(cmd=cmd) - - ofctl.mod_meter_entry(dp, meter, mod_cmd) - - @staticmethod - @command_method - def mod_group_entry(req, dp, ofctl, group, cmd, **kwargs): - cmd_convert = { - 'add': dp.ofproto.OFPGC_ADD, - 'modify': dp.ofproto.OFPGC_MODIFY, - 'delete': dp.ofproto.OFPGC_DELETE, - } - mod_cmd = cmd_convert.get(cmd, None) - if mod_cmd is None: - raise CommandNotFoundError(cmd=cmd) - - ofctl.mod_group_entry(dp, group, mod_cmd) - - @command_method - def mod_port_behavior(self, req, dp, ofctl, port_config, cmd, **kwargs): - port_no = port_config.get('port_no', None) - port_no = int(str(port_no), 0) - - port_info = self.dpset.port_state[int(dp.id)].get(port_no) - if port_info: - port_config.setdefault('hw_addr', port_info.hw_addr) - port_config.setdefault('advertise', port_info.advertised) - else: - raise PortNotFoundError(port_no=port_no) - - if cmd != 'modify': - raise CommandNotFoundError(cmd=cmd) - - ofctl.mod_port_behavior(dp, port_config) - - @staticmethod - @command_method - def send_experimenter(req, dp, ofctl, exp, **kwargs): - ofctl.send_experimenter(dp, exp) - - @staticmethod - @command_method - def set_role(req, dp, ofctl, role, **kwargs): - ofctl.set_role(dp, role) - - -class RestStatsApi(app_manager.OSKenApp): - OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] - _CONTEXTS = { - 'dpset': dpset.DPSet, - 'wsgi': WSGIApplication - } - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.dpset = kwargs['dpset'] - wsgi = kwargs['wsgi'] - self.waiters = {} - self.data = {} - self.data['dpset'] = self.dpset - self.data['waiters'] = self.waiters - mapper = wsgi.mapper - - wsgi.registory['StatsController'] = self.data - path = '/stats' - uri = path + '/switches' - mapper.connect('stats', uri, - controller=StatsController, action='get_dpids', - conditions=dict(method=['GET'])) - - uri = path + '/desc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_desc_stats', - conditions=dict(method=['GET'])) - - uri = path + '/flowdesc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_flow_stats', - conditions=dict(method=['GET', 'POST'])) - - uri = path + '/flow/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_flow_stats', - conditions=dict(method=['GET', 'POST'])) - - uri = path + '/aggregateflow/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, - action='get_aggregate_flow_stats', - conditions=dict(method=['GET', 'POST'])) - - uri = path + '/table/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_table_stats', - conditions=dict(method=['GET'])) - - uri = path + '/tablefeatures/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_table_features', - conditions=dict(method=['GET'])) - - uri = path + '/port/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_port_stats', - conditions=dict(method=['GET'])) - - uri = path + '/port/{dpid}/{port}' - mapper.connect('stats', uri, - controller=StatsController, action='get_port_stats', - conditions=dict(method=['GET'])) - - uri = path + '/queue/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) - - uri = path + '/queue/{dpid}/{port}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) - - uri = path + '/queue/{dpid}/{port}/{queue_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) - uri = path + '/queueconfig/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_config', - conditions=dict(method=['GET'])) - - uri = path + '/queueconfig/{dpid}/{port}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_config', - conditions=dict(method=['GET'])) - - uri = path + '/queuedesc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) - - uri = path + '/queuedesc/{dpid}/{port}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) - - uri = path + '/queuedesc/{dpid}/{port}/{queue}' - mapper.connect('stats', uri, - controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) - - uri = path + '/meterfeatures/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_features', - conditions=dict(method=['GET'])) - - uri = path + '/meterconfig/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_config', - conditions=dict(method=['GET'])) - - uri = path + '/meterconfig/{dpid}/{meter_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_config', - conditions=dict(method=['GET'])) - - uri = path + '/meterdesc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_desc', - conditions=dict(method=['GET'])) - - uri = path + '/meterdesc/{dpid}/{meter_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_desc', - conditions=dict(method=['GET'])) - - uri = path + '/meter/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_stats', - conditions=dict(method=['GET'])) - - uri = path + '/meter/{dpid}/{meter_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_meter_stats', - conditions=dict(method=['GET'])) - - uri = path + '/groupfeatures/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_group_features', - conditions=dict(method=['GET'])) - - uri = path + '/groupdesc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_group_desc', - conditions=dict(method=['GET'])) - - uri = path + '/groupdesc/{dpid}/{group_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_group_desc', - conditions=dict(method=['GET'])) - - uri = path + '/group/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_group_stats', - conditions=dict(method=['GET'])) - - uri = path + '/group/{dpid}/{group_id}' - mapper.connect('stats', uri, - controller=StatsController, action='get_group_stats', - conditions=dict(method=['GET'])) - - uri = path + '/portdesc/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_port_desc', - conditions=dict(method=['GET'])) - - uri = path + '/portdesc/{dpid}/{port_no}' - mapper.connect('stats', uri, - controller=StatsController, action='get_port_desc', - conditions=dict(method=['GET'])) - - uri = path + '/role/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='get_role', - conditions=dict(method=['GET'])) - - uri = path + '/flowentry/{cmd}' - mapper.connect('stats', uri, - controller=StatsController, action='mod_flow_entry', - conditions=dict(method=['POST'])) - - uri = path + '/flowentry/clear/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='delete_flow_entry', - conditions=dict(method=['DELETE'])) - - uri = path + '/meterentry/{cmd}' - mapper.connect('stats', uri, - controller=StatsController, action='mod_meter_entry', - conditions=dict(method=['POST'])) - - uri = path + '/groupentry/{cmd}' - mapper.connect('stats', uri, - controller=StatsController, action='mod_group_entry', - conditions=dict(method=['POST'])) - - uri = path + '/portdesc/{cmd}' - mapper.connect('stats', uri, - controller=StatsController, action='mod_port_behavior', - conditions=dict(method=['POST'])) - - uri = path + '/experimenter/{dpid}' - mapper.connect('stats', uri, - controller=StatsController, action='send_experimenter', - conditions=dict(method=['POST'])) - - uri = path + '/role' - mapper.connect('stats', uri, - controller=StatsController, action='set_role', - conditions=dict(method=['POST'])) - - self.server = WSGIServer(wsgi, OFCTL_HOST, OFCTL_PORT) - self.server_thread = hub.spawn(self.server.serve_forever) - - @set_ev_cls([ofp_event.EventOFPStatsReply, - ofp_event.EventOFPDescStatsReply, - ofp_event.EventOFPFlowStatsReply, - ofp_event.EventOFPAggregateStatsReply, - ofp_event.EventOFPTableStatsReply, - ofp_event.EventOFPTableFeaturesStatsReply, - ofp_event.EventOFPPortStatsReply, - ofp_event.EventOFPQueueStatsReply, - ofp_event.EventOFPQueueDescStatsReply, - ofp_event.EventOFPMeterStatsReply, - ofp_event.EventOFPMeterFeaturesStatsReply, - ofp_event.EventOFPMeterConfigStatsReply, - ofp_event.EventOFPGroupStatsReply, - ofp_event.EventOFPGroupFeaturesStatsReply, - ofp_event.EventOFPGroupDescStatsReply, - ofp_event.EventOFPPortDescStatsReply - ], MAIN_DISPATCHER) - def stats_reply_handler(self, ev): - msg = ev.msg - dp = msg.datapath - - if dp.id not in self.waiters: - return - if msg.xid not in self.waiters[dp.id]: - return - lock, msgs = self.waiters[dp.id][msg.xid] - msgs.append(msg) - - flags = dp.ofproto.OFPMPF_REPLY_MORE - - if msg.flags & flags: - return - del self.waiters[dp.id][msg.xid] - lock.set() - - @set_ev_cls([ofp_event.EventOFPSwitchFeatures, - ofp_event.EventOFPQueueGetConfigReply, - ofp_event.EventOFPRoleReply, - ], MAIN_DISPATCHER) - def features_reply_handler(self, ev): - msg = ev.msg - dp = msg.datapath - - if dp.id not in self.waiters: - return - if msg.xid not in self.waiters[dp.id]: - return - lock, msgs = self.waiters[dp.id][msg.xid] - msgs.append(msg) - - del self.waiters[dp.id][msg.xid] - lock.set() diff --git a/clib/ofctl_rest/wsgi.py b/clib/ofctl_rest/wsgi.py deleted file mode 100644 index f385a7a2fd..0000000000 --- a/clib/ofctl_rest/wsgi.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation. -# Copyright (C) 2012 Isaku Yamahata -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -# implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from types import MethodType - -from routes import Mapper -from routes.util import URLGenerator -import six -from tinyrpc.server import RPCServer -from tinyrpc.dispatch import RPCDispatcher -from tinyrpc.protocols.jsonrpc import JSONRPCProtocol -from tinyrpc.transports import ServerTransport, ClientTransport -from tinyrpc.client import RPCClient -import webob.dec -import webob.exc -from webob.request import Request as webob_Request -from webob.response import Response as webob_Response -import eventlet.wsgi - -from os_ken.lib import hub - -# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods - - -def route(name, path, methods=None, requirements=None): - def _route(controller_method): - controller_method.routing_info = { - 'name': name, - 'path': path, - 'methods': methods, - 'requirements': requirements, - } - return controller_method - return _route - - -class Request(webob_Request): - """ - Wrapper class for webob.request.Request. - - The behavior of this class is the same as webob.request.Request - except for setting "charset" to "UTF-8" automatically. - """ - DEFAULT_CHARSET = "UTF-8" - - def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs): - super().__init__( - environ, charset=charset, *args, **kwargs) - - -class Response(webob_Response): - """ - Wrapper class for webob.response.Response. - - The behavior of this class is the same as webob.response.Response - except for setting "charset" to "UTF-8" automatically. - """ - DEFAULT_CHARSET = "UTF-8" - - def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs): - super().__init__(charset=charset, *args, **kwargs) - - -class WebSocketRegistrationWrapper: - - def __init__(self, func, controller): - self._controller = controller - self._controller_method = MethodType(func, controller) - - def __call__(self, ws): - wsgi_application = self._controller.parent - ws_manager = wsgi_application.websocketmanager - ws_manager.add_connection(ws) - try: - self._controller_method(ws) # pylint: disable=not-callable - finally: - ws_manager.delete_connection(ws) - - -class _AlreadyHandledResponse(Response): - # XXX: Eventlet API should not be used directly. - # https://github.com/benoitc/gunicorn/pull/2581 - _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) - - def __call__(self, environ, start_response): - return self._ALREADY_HANDLED - -def websocket(name, path): - def _websocket(controller_func): - def __websocket(self, req, **_): - wrapper = WebSocketRegistrationWrapper(controller_func, self) - ws_wsgi = hub.WebSocketWSGI(wrapper) - ws_wsgi(req.environ, req.start_response) - # XXX: In order to prevent the writing to a already closed socket. - # This issue is caused by combined use: - # - webob.dec.wsgify() - # - eventlet.wsgi.HttpProtocol.handle_one_response() - return _AlreadyHandledResponse() - __websocket.routing_info = { - 'name': name, - 'path': path, - 'methods': None, - 'requirements': None, - } - return __websocket - return _websocket - - -class ControllerBase: - special_vars = ['action', 'controller'] - - def __init__(self, req, link, data, **config): - self.req = req - self.link = link - self.data = data - self.parent = None - for name, value in config.items(): - setattr(self, name, value) - - def __call__(self, req): - action = self.req.urlvars.get('action', 'index') - if hasattr(self, '__before__'): - self.__before__() - - kwargs = self.req.urlvars.copy() - for attr in self.special_vars: - if attr in kwargs: - del kwargs[attr] - - return getattr(self, action)(req, **kwargs) - - -class WebSocketDisconnectedError(Exception): - pass - - -class WebSocketServerTransport(ServerTransport): - def __init__(self, ws): - self.ws = ws - - def receive_message(self): - message = self.ws.wait() - if message is None: - raise WebSocketDisconnectedError() - context = None - return context, message - - def send_reply(self, context, reply): - self.ws.send(six.text_type(reply)) - - -class WebSocketRPCServer(RPCServer): - def __init__(self, ws, rpc_callback): - dispatcher = RPCDispatcher() - dispatcher.register_instance(rpc_callback) - super().__init__( - WebSocketServerTransport(ws), - JSONRPCProtocol(), - dispatcher, - ) - - def serve_forever(self): - try: - super().serve_forever() - except WebSocketDisconnectedError: - return - - def _spawn(self, func, *args, **kwargs): - hub.spawn(func, *args, **kwargs) - - -class WebSocketClientTransport(ClientTransport): - - def __init__(self, ws, queue): - self.ws = ws - self.queue = queue - - def send_message(self, message, expect_reply=True): - self.ws.send(six.text_type(message)) - - if expect_reply: - return self.queue.get() - return None - - -class WebSocketRPCClient(RPCClient): - - def __init__(self, ws): - self.ws = ws - self.queue = hub.Queue() - super().__init__( - JSONRPCProtocol(), - WebSocketClientTransport(ws, self.queue), - ) - - def serve_forever(self): - while True: - msg = self.ws.wait() - if msg is None: - break - self.queue.put(msg) - - -class wsgify_hack(webob.dec.wsgify): - def __call__(self, environ, start_response): - self.kwargs['start_response'] = start_response - return super().__call__(environ, start_response) - - -class WebSocketManager: - - def __init__(self): - self._connections = [] - - def add_connection(self, ws): - self._connections.append(ws) - - def delete_connection(self, ws): - self._connections.remove(ws) - - def broadcast(self, msg): - for connection in self._connections: - connection.send(msg) - - -class WSGIApplication: - def __init__(self, **config): - self.config = config - self.mapper = Mapper() - self.registory = {} - self._wsmanager = WebSocketManager() - super().__init__() - - def _match(self, req): - # Note: Invoke the new API, first. If the arguments unmatched, - # invoke the old API. - try: - return self.mapper.match(environ=req.environ) - except TypeError: - self.mapper.environ = req.environ - return self.mapper.match(req.path_info) - - @wsgify_hack - def __call__(self, req, start_response): - match = self._match(req) - - if not match: - return webob.exc.HTTPNotFound() - - req.start_response = start_response - req.urlvars = match - link = URLGenerator(self.mapper, req.environ) - - data = None - name = match['controller'].__name__ - if name in self.registory: - data = self.registory[name] - - controller = match['controller'](req, link, data, **self.config) - controller.parent = self - return controller(req) - - def register(self, controller, data=None): - def _target_filter(attr): - if not inspect.ismethod(attr) and not inspect.isfunction(attr): - return False - if not hasattr(attr, 'routing_info'): - return False - return True - methods = inspect.getmembers(controller, _target_filter) - for method_name, method in methods: - routing_info = getattr(method, 'routing_info') - name = routing_info['name'] - path = routing_info['path'] - conditions = {} - if routing_info.get('methods'): - conditions['method'] = routing_info['methods'] - requirements = routing_info.get('requirements') or {} - self.mapper.connect(name, - path, - controller=controller, - requirements=requirements, - action=method_name, - conditions=conditions) - if data: - self.registory[controller.__name__] = data - - @property - def websocketmanager(self): - return self._wsmanager - - -class WSGIServer(hub.WSGIServer): - def __init__(self, application, host, port, **config): - super().__init__((host, port), application, **config) - - def __call__(self): - self.serve_forever() diff --git a/ofctl_rest/ofctl_rest.py b/ofctl_rest/ofctl_rest.py index cae83a369e..3650c6a7de 100644 --- a/ofctl_rest/ofctl_rest.py +++ b/ofctl_rest/ofctl_rest.py @@ -44,6 +44,8 @@ ofproto_v1_3.OFP_VERSION: ofctl_v1_3, } +# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods,disable=unused-argument,disable=no-member + # REST API # @@ -289,8 +291,10 @@ def wrapper(self, req, *args, **kwargs): class StatsController(ControllerBase): + # pytype: disable=attribute-error + def __init__(self, req, link, data, **config): - super(StatsController, self).__init__(req, link, data, **config) + super().__init__(req, link, data, **config) self.dpset = data['dpset'] self.waiters = data['waiters'] @@ -410,8 +414,9 @@ def get_port_desc(self, req, dp, ofctl, port_no=None, **kwargs): def get_role(self, req, dp, ofctl, **kwargs): return ofctl.get_role(dp, self.waiters) + @staticmethod @command_method - def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): + def mod_flow_entry(req, dp, ofctl, flow, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPFC_ADD, 'modify': dp.ofproto.OFPFC_MODIFY, @@ -425,13 +430,15 @@ def mod_flow_entry(self, req, dp, ofctl, flow, cmd, **kwargs): ofctl.mod_flow_entry(dp, flow, mod_cmd) + @staticmethod @command_method - def delete_flow_entry(self, req, dp, ofctl, flow, **kwargs): + def delete_flow_entry(req, dp, ofctl, flow, **kwargs): flow = {'table_id': dp.ofproto.OFPTT_ALL} ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE) + @staticmethod @command_method - def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): + def mod_meter_entry(req, dp, ofctl, meter, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPMC_ADD, 'modify': dp.ofproto.OFPMC_MODIFY, @@ -443,8 +450,9 @@ def mod_meter_entry(self, req, dp, ofctl, meter, cmd, **kwargs): ofctl.mod_meter_entry(dp, meter, mod_cmd) + @staticmethod @command_method - def mod_group_entry(self, req, dp, ofctl, group, cmd, **kwargs): + def mod_group_entry(req, dp, ofctl, group, cmd, **kwargs): cmd_convert = { 'add': dp.ofproto.OFPGC_ADD, 'modify': dp.ofproto.OFPGC_MODIFY, @@ -473,12 +481,14 @@ def mod_port_behavior(self, req, dp, ofctl, port_config, cmd, **kwargs): ofctl.mod_port_behavior(dp, port_config) + @staticmethod @command_method - def send_experimenter(self, req, dp, ofctl, exp, **kwargs): + def send_experimenter(req, dp, ofctl, exp, **kwargs): ofctl.send_experimenter(dp, exp) + @staticmethod @command_method - def set_role(self, req, dp, ofctl, role, **kwargs): + def set_role(req, dp, ofctl, role, **kwargs): ofctl.set_role(dp, role) @@ -490,7 +500,7 @@ class RestStatsApi(app_manager.OSKenApp): } def __init__(self, *args, **kwargs): - super(RestStatsApi, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.dpset = kwargs['dpset'] wsgi = kwargs['wsgi'] self.waiters = {} @@ -504,162 +514,162 @@ def __init__(self, *args, **kwargs): uri = path + '/switches' mapper.connect('stats', uri, controller=StatsController, action='get_dpids', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/desc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_desc_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/flowdesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_flow_stats', - conditions=dict(method=['GET', 'POST'])) + conditions={"method": ['GET', 'POST']}) uri = path + '/flow/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_flow_stats', - conditions=dict(method=['GET', 'POST'])) + conditions={"method": ['GET', 'POST']}) uri = path + '/aggregateflow/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_aggregate_flow_stats', - conditions=dict(method=['GET', 'POST'])) + conditions={"method": ['GET', 'POST']}) uri = path + '/table/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_table_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/tablefeatures/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_table_features', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/port/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_port_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/port/{dpid}/{port}' mapper.connect('stats', uri, controller=StatsController, action='get_port_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queue/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queue/{dpid}/{port}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queue/{dpid}/{port}/{queue_id}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queueconfig/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_config', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queueconfig/{dpid}/{port}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_config', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queuedesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queuedesc/{dpid}/{port}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/queuedesc/{dpid}/{port}/{queue}' mapper.connect('stats', uri, controller=StatsController, action='get_queue_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meterfeatures/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_features', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meterconfig/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_config', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meterconfig/{dpid}/{meter_id}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_config', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meterdesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meterdesc/{dpid}/{meter_id}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meter/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/meter/{dpid}/{meter_id}' mapper.connect('stats', uri, controller=StatsController, action='get_meter_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/groupfeatures/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_group_features', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/groupdesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_group_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/groupdesc/{dpid}/{group_id}' mapper.connect('stats', uri, controller=StatsController, action='get_group_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/group/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_group_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/group/{dpid}/{group_id}' mapper.connect('stats', uri, controller=StatsController, action='get_group_stats', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/portdesc/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_port_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/portdesc/{dpid}/{port_no}' mapper.connect('stats', uri, controller=StatsController, action='get_port_desc', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/role/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='get_role', - conditions=dict(method=['GET'])) + conditions={"method": ['GET']}) uri = path + '/flowentry/{cmd}' mapper.connect('stats', uri, diff --git a/ofctl_rest/wsgi.py b/ofctl_rest/wsgi.py index b0fbd668fe..f385a7a2fd 100644 --- a/ofctl_rest/wsgi.py +++ b/ofctl_rest/wsgi.py @@ -29,11 +29,11 @@ import webob.exc from webob.request import Request as webob_Request from webob.response import Response as webob_Response +import eventlet.wsgi from os_ken.lib import hub -HEX_PATTERN = r'0x[0-9a-z]+' -DIGIT_PATTERN = r'[1-9][0-9]*' +# pylint: disable=missing-function-docstring,disable=invalid-name,disable=missing-class-docstring,disable=too-few-public-methods def route(name, path, methods=None, requirements=None): @@ -58,7 +58,7 @@ class Request(webob_Request): DEFAULT_CHARSET = "UTF-8" def __init__(self, environ, charset=DEFAULT_CHARSET, *args, **kwargs): - super(Request, self).__init__( + super().__init__( environ, charset=charset, *args, **kwargs) @@ -72,10 +72,10 @@ class Response(webob_Response): DEFAULT_CHARSET = "UTF-8" def __init__(self, charset=DEFAULT_CHARSET, *args, **kwargs): - super(Response, self).__init__(charset=charset, *args, **kwargs) + super().__init__(charset=charset, *args, **kwargs) -class WebSocketRegistrationWrapper(object): +class WebSocketRegistrationWrapper: def __init__(self, func, controller): self._controller = controller @@ -86,7 +86,7 @@ def __call__(self, ws): ws_manager = wsgi_application.websocketmanager ws_manager.add_connection(ws) try: - self._controller_method(ws) + self._controller_method(ws) # pylint: disable=not-callable finally: ws_manager.delete_connection(ws) @@ -94,19 +94,11 @@ def __call__(self, ws): class _AlreadyHandledResponse(Response): # XXX: Eventlet API should not be used directly. # https://github.com/benoitc/gunicorn/pull/2581 - from packaging import version - import eventlet - if version.parse(eventlet.__version__) >= version.parse("0.30.3"): - import eventlet.wsgi - _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) - else: - from eventlet.wsgi import ALREADY_HANDLED - _ALREADY_HANDLED = ALREADY_HANDLED + _ALREADY_HANDLED = getattr(eventlet.wsgi, "ALREADY_HANDLED", None) def __call__(self, environ, start_response): return self._ALREADY_HANDLED - def websocket(name, path): def _websocket(controller_func): def __websocket(self, req, **_): @@ -128,7 +120,7 @@ def __websocket(self, req, **_): return _websocket -class ControllerBase(object): +class ControllerBase: special_vars = ['action', 'controller'] def __init__(self, req, link, data, **config): @@ -175,7 +167,7 @@ class WebSocketRPCServer(RPCServer): def __init__(self, ws, rpc_callback): dispatcher = RPCDispatcher() dispatcher.register_instance(rpc_callback) - super(WebSocketRPCServer, self).__init__( + super().__init__( WebSocketServerTransport(ws), JSONRPCProtocol(), dispatcher, @@ -183,7 +175,7 @@ def __init__(self, ws, rpc_callback): def serve_forever(self): try: - super(WebSocketRPCServer, self).serve_forever() + super().serve_forever() except WebSocketDisconnectedError: return @@ -202,6 +194,7 @@ def send_message(self, message, expect_reply=True): if expect_reply: return self.queue.get() + return None class WebSocketRPCClient(RPCClient): @@ -209,7 +202,7 @@ class WebSocketRPCClient(RPCClient): def __init__(self, ws): self.ws = ws self.queue = hub.Queue() - super(WebSocketRPCClient, self).__init__( + super().__init__( JSONRPCProtocol(), WebSocketClientTransport(ws, self.queue), ) @@ -225,10 +218,10 @@ def serve_forever(self): class wsgify_hack(webob.dec.wsgify): def __call__(self, environ, start_response): self.kwargs['start_response'] = start_response - return super(wsgify_hack, self).__call__(environ, start_response) + return super().__call__(environ, start_response) -class WebSocketManager(object): +class WebSocketManager: def __init__(self): self._connections = [] @@ -244,13 +237,13 @@ def broadcast(self, msg): connection.send(msg) -class WSGIApplication(object): +class WSGIApplication: def __init__(self, **config): self.config = config self.mapper = Mapper() self.registory = {} self._wsmanager = WebSocketManager() - super(WSGIApplication, self).__init__() + super().__init__() def _match(self, req): # Note: Invoke the new API, first. If the arguments unmatched, @@ -313,7 +306,7 @@ def websocketmanager(self): class WSGIServer(hub.WSGIServer): def __init__(self, application, host, port, **config): - super(WSGIServer, self).__init__((host, port), application, **config) + super().__init__((host, port), application, **config) def __call__(self): self.serve_forever() diff --git a/tests/codecheck/src_files.sh b/tests/codecheck/src_files.sh index d3e117c32d..36c64f8bfc 100755 --- a/tests/codecheck/src_files.sh +++ b/tests/codecheck/src_files.sh @@ -14,7 +14,7 @@ if [[ "$*" == "" ]] ; then files=("${files[@]}" "${root_files[@]}") - for dir in adapters clib docs faucet tests ; do + for dir in adapters clib ofctl_rest docs faucet tests ; do readarray -t sub_files \ <<< "$(find "${BASEDIR}/${dir}/" -type f ! -size 0 -name '*.py' -exec realpath {} \;)" From 5c1ff906866eb4d35461793d7ee2cc54eb654b43 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 01:22:53 +0000 Subject: [PATCH 176/231] install-faucet.sh --- docker/install-faucet.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/install-faucet.sh b/docker/install-faucet.sh index b21a401c2a..89df7354b4 100755 --- a/docker/install-faucet.sh +++ b/docker/install-faucet.sh @@ -38,6 +38,8 @@ apk add libstdc++ rm -r "${HOME}/.cache" rm -r "${FROOT}" rm -r /usr/local/lib/python3*/site-packages/os_ken/tests/ +rm -r /usr/local/lib/python3*/site-packages/os_ken/lib/of_config/ +rm /usr/local/lib/python3*/site-packages/os_ken/cmd/of_config_cli.py # Smoke test faucet -V || exit 1 From 4ae2148632df06220de39b08376639c196104a7a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 01:24:12 +0000 Subject: [PATCH 177/231] setup.py --- setup.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/setup.py b/setup.py index 31d45858c4..468a53a564 100755 --- a/setup.py +++ b/setup.py @@ -43,19 +43,19 @@ def install_configs(): def setup_ryu_conf(): if not os.path.exists(dst_ryu_conf_dir): - print(f"Creating {dst_ryu_conf_dir}") + print("Creating %s" % dst_ryu_conf_dir) os.makedirs(dst_ryu_conf_dir) if not os.path.isfile(dst_ryu_conf): if os.path.exists(old_ryu_conf) and os.path.isfile(old_ryu_conf): - print(f"Migrating {old_ryu_conf} to {dst_ryu_conf}") + print("Migrating %s to %s" % (old_ryu_conf, dst_ryu_conf)) shutil.copy(old_ryu_conf, dst_ryu_conf) else: - print(f"Copying {src_ryu_conf} to {dst_ryu_conf}") + print("Copying %s to %s" % (src_ryu_conf, dst_ryu_conf)) shutil.copy(src_ryu_conf, dst_ryu_conf) def setup_faucet_conf(): if not os.path.exists(dst_faucet_conf_dir): - print(f"Creating {dst_faucet_conf_dir}") + print("Creating %s" % dst_faucet_conf_dir) os.makedirs(dst_faucet_conf_dir) for file_name in os.listdir(src_faucet_conf_dir): src_file = os.path.join(src_faucet_conf_dir, file_name) @@ -64,35 +64,31 @@ def setup_faucet_conf(): if os.path.isfile(dst_file): continue if os.path.isfile(alt_src): - print(f"Migrating {alt_src} to {dst_file}") + print("Migrating %s to %s" % (alt_src, dst_file)) shutil.copy(alt_src, dst_file) elif os.path.isfile(src_file): - print(f"Copying {src_file} to {dst_file}") + print("Copying %s to %s" % (src_file, dst_file)) shutil.copy(src_file, dst_file) def setup_faucet_log(): if not os.path.exists(faucet_log_dir): - print(f"Creating {faucet_log_dir}") + print("Creating %s" % faucet_log_dir) os.makedirs(faucet_log_dir) try: setup_ryu_conf() setup_faucet_conf() - except FileNotFoundError as exception: - print(str(exception)) + setup_faucet_log() except OSError as exception: if exception.errno == errno.EACCES: - print(f"Permission denied creating {exception.filename}, skipping copying configs") + print("Permission denied creating %s, skipping copying configs" + % exception.filename) elif exception.errno == errno.ENOENT: - print(f"File not found creating {exception.filename}, skipping copying configs") + print("File not found creating %s, skipping copying configs" + % exception.filename) else: raise - try: - setup_faucet_log() - except OSError as exception: - print(str(exception)) - setup( name='faucet', From c57ee4b9cd5c9bb0f5a557a45eb0432bb9303d95 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 02:01:28 +0000 Subject: [PATCH 178/231] ofctl path. --- clib/mininet_test_topo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index a2a7ae21c6..a8c218237c 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -638,7 +638,7 @@ class FAUCET(BaseFAUCET): """Start a FAUCET controller.""" START_ARGS = ['--ryu-app-lists=%s' % (os.path.join(os.path.dirname( - os.path.realpath(__file__)), 'ofctl_rest/ofctl_rest.py'))] + os.path.realpath(__file__)), '/../ofctl_rest/ofctl_rest.py'))] def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, ctl_privkey, ctl_cert, ca_certs, From 444e2b27ee7c23520ee6eb3071cfe4dd024fa90e Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 02:15:59 +0000 Subject: [PATCH 179/231] pylint. --- .gitignore | 2 ++ ofctl_rest/ofctl_rest.py | 14 +++++++------- setup.py | 2 ++ 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 77e86c8865..4723003cf8 100644 --- a/.gitignore +++ b/.gitignore @@ -113,3 +113,5 @@ ENV/ .idea/ faucet.iml *.bak +AUTHORS +Changelog diff --git a/ofctl_rest/ofctl_rest.py b/ofctl_rest/ofctl_rest.py index 3650c6a7de..cac803f2e0 100644 --- a/ofctl_rest/ofctl_rest.py +++ b/ofctl_rest/ofctl_rest.py @@ -674,37 +674,37 @@ def __init__(self, *args, **kwargs): uri = path + '/flowentry/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_flow_entry', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) uri = path + '/flowentry/clear/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='delete_flow_entry', - conditions=dict(method=['DELETE'])) + conditions={"method": ['DELETE']}) uri = path + '/meterentry/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_meter_entry', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) uri = path + '/groupentry/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_group_entry', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) uri = path + '/portdesc/{cmd}' mapper.connect('stats', uri, controller=StatsController, action='mod_port_behavior', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) uri = path + '/experimenter/{dpid}' mapper.connect('stats', uri, controller=StatsController, action='send_experimenter', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) uri = path + '/role' mapper.connect('stats', uri, controller=StatsController, action='set_role', - conditions=dict(method=['POST'])) + conditions={"method": ['POST']}) self.server = WSGIServer(wsgi, OFCTL_HOST, OFCTL_PORT) self.server_thread = hub.spawn(self.server.serve_forever) diff --git a/setup.py b/setup.py index 468a53a564..be733ff413 100755 --- a/setup.py +++ b/setup.py @@ -2,6 +2,8 @@ """Faucet setup script""" +# pylint: disable=consider-using-f-string + from __future__ import print_function import errno From 9d42fcc61cafe2c0a67165a5cf3d36d45430689d Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 02:26:43 +0000 Subject: [PATCH 180/231] path. --- clib/mininet_test_topo.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/clib/mininet_test_topo.py b/clib/mininet_test_topo.py index a8c218237c..96abb6bd21 100644 --- a/clib/mininet_test_topo.py +++ b/clib/mininet_test_topo.py @@ -637,8 +637,9 @@ def stop(self): # pylint: disable=arguments-differ class FAUCET(BaseFAUCET): """Start a FAUCET controller.""" - START_ARGS = ['--ryu-app-lists=%s' % (os.path.join(os.path.dirname( - os.path.realpath(__file__)), '/../ofctl_rest/ofctl_rest.py'))] + START_ARGS = ['--ryu-app-lists=%s' % (os.path.dirname(os.path.realpath(__file__)) + + '/../ofctl_rest/ofctl_rest.py')] + def __init__(self, name, tmpdir, controller_intf, controller_ipv6, env, ctl_privkey, ctl_cert, ca_certs, From 20608bc9d5bd78adc01b195516f5342e13069905 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 22 Feb 2023 02:59:00 +0000 Subject: [PATCH 181/231] workaround. --- docker/runtests.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker/runtests.sh b/docker/runtests.sh index 7ea453cbd1..194b7dad12 100755 --- a/docker/runtests.sh +++ b/docker/runtests.sh @@ -144,6 +144,9 @@ if [ "$HELP" == 1 ] ; then exit 0 fi +# workaround for c65faucet/faucet +export PBR_VERSION=0.0.0 + if [ "$UNIT_TESTS" == 1 ] ; then echo "========== Running faucet unit tests ==========" cd /faucet-src/tests From bbcf3bea82173cea2d004d5b442ad240cf488bf1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Mar 2023 04:04:07 +0000 Subject: [PATCH 182/231] Bump pypa/gh-action-pypi-publish from 1.7.1 to 1.8.3 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.7.1 to 1.8.3. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.7.1...v1.8.3) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index 44f931196c..d2e37d9b22 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -24,7 +24,7 @@ jobs: - name: Build python package run: python3 setup.py sdist - name: Publish python package to PyPI - uses: pypa/gh-action-pypi-publish@v1.7.1 + uses: pypa/gh-action-pypi-publish@v1.8.3 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 5a937160febe6e789cc51f16190d87f55729d440 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 23 Mar 2023 04:04:26 +0000 Subject: [PATCH 183/231] Bump pylint from 2.17.0 to 2.17.1 Bumps [pylint](https://github.com/PyCQA/pylint) from 2.17.0 to 2.17.1. - [Release notes](https://github.com/PyCQA/pylint/releases) - [Commits](https://github.com/PyCQA/pylint/compare/v2.17.0...v2.17.1) --- updated-dependencies: - dependency-name: pylint dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 488accf674..79f618c874 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,3 +1,3 @@ flake8==6.0.0 -pylint==2.17.0 +pylint==2.17.1 pytype==2022.10.26 From d4408c43a07afeface72514eb364f844c53ed3fc Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 23 Mar 2023 07:25:50 +0000 Subject: [PATCH 184/231] python3 1.0.16. --- Dockerfile.faucet | 2 +- Dockerfile.gauge | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index cac72a161c..19e344df9b 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:latest +FROM c65sdn/python3:1.0.16 COPY ./ /faucet-src/ diff --git a/Dockerfile.gauge b/Dockerfile.gauge index 04a6553e35..c567d40437 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:latest +FROM c65sdn/faucet:1.0.16 VOLUME ["/etc/faucet/", "/var/log/faucet/"] From 7e11abecff380a970e4d4ec9d69e5cf3f398c702 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 24 Mar 2023 04:04:39 +0000 Subject: [PATCH 185/231] Bump c65sdn/faucet from 1.0.16 to 1.0.46 Bumps c65sdn/faucet from 1.0.16 to 1.0.46. --- updated-dependencies: - dependency-name: c65sdn/faucet dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Dockerfile.gauge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.gauge b/Dockerfile.gauge index c567d40437..ca0babc169 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.16 +FROM c65sdn/faucet:1.0.46 VOLUME ["/etc/faucet/", "/var/log/faucet/"] From 334841b9152fa319352af27f35008b76a0b1546c Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 28 Mar 2023 03:37:53 +0000 Subject: [PATCH 186/231] consider-using-f-string --- .pylintrc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.pylintrc b/.pylintrc index a9ee9e1ac3..d4c231dc5c 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,7 +1,8 @@ [MASTER] disable= fixme, - import-error + import-error, + consider-using-f-string [FORMAT] max-line-length=120 From 68fd153d923dafaaa898dd9cef94e0b457225a90 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 18 Apr 2023 22:30:23 +0000 Subject: [PATCH 187/231] update dnspython --- requirements.txt | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index 3e5cdf9fb8..da77e1a3ff 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,11 @@ -c65chewie==1.0.3 +c65chewie==1.0.4 # git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 ruamel.yaml==0.17.21 -c65beka==1.0.0 -# dnspython 2.3.0 is not compatible with eventlet -# https://github.com/eventlet/eventlet/issues/781 -dnspython<2.3.0 +c65beka==1.0.1 +dnspython==2.3.0 os_ken==2.6.0 pytricia>=1.0.0 From 1563a22e6298b3679556285503bf7602261358b9 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 18 Apr 2023 22:58:05 +0000 Subject: [PATCH 188/231] changes. --- .github/workflows/tests-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 69618e665f..ed100d25b6 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -4,7 +4,7 @@ on: [push, pull_request] env: FILES_CHANGED: "all" - MATRIX_SHARDS: 15 + MATRIX_SHARDS: 10 jobs: sanity-tests: From e5bd1efe975a81f9f57969632a862aef190dab45 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 18 Apr 2023 23:50:50 +0000 Subject: [PATCH 189/231] safe. --- .github/workflows/tests-codecheck.yml | 3 ++- .github/workflows/tests-docs.yml | 3 ++- .github/workflows/tests-integration.yml | 6 ++++-- .github/workflows/tests-unit.yml | 3 ++- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests-codecheck.yml b/.github/workflows/tests-codecheck.yml index f6a536d7bd..e9612c7c90 100644 --- a/.github/workflows/tests-codecheck.yml +++ b/.github/workflows/tests-codecheck.yml @@ -14,10 +14,11 @@ jobs: - name: Checkout repo uses: actions/checkout@v3 - if: ${{ github.event_name == 'pull_request' }} - name: Setup dependencies + name: Setup dependencies for changed files action run: | sudo apt-get update -y sudo apt-get install -y jq + git config --global --add safe.directory "$GITHUB_WORKSPACE" - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes diff --git a/.github/workflows/tests-docs.yml b/.github/workflows/tests-docs.yml index 621167dbe3..7e69972ee1 100644 --- a/.github/workflows/tests-docs.yml +++ b/.github/workflows/tests-docs.yml @@ -14,10 +14,11 @@ jobs: - name: Checkout repo uses: actions/checkout@v3 - if: ${{ github.event_name == 'pull_request' }} - name: Setup dependencies + name: Setup dependencies for changed files action run: | sudo apt-get update -y sudo apt-get install -y jq + git config --global --add safe.directory "$GITHUB_WORKSPACE" - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index ed100d25b6..edbb632590 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -17,10 +17,11 @@ jobs: - name: Checkout repo uses: actions/checkout@v3 - if: ${{ github.event_name == 'pull_request' }} - name: Setup dependencies + name: Setup dependencies for changed files action run: | sudo apt-get update -y sudo apt-get install -y jq + git config --global --add safe.directory "$GITHUB_WORKSPACE" - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes @@ -86,10 +87,11 @@ jobs: - name: Checkout repo uses: actions/checkout@v3 - if: ${{ github.event_name == 'pull_request' && github.event.before != '0000000000000000000000000000000000000000' }} - name: Setup dependencies + name: Setup dependencies for changed files action run: | sudo apt-get update -y sudo apt-get install -y jq + git config --global --add safe.directory "$GITHUB_WORKSPACE" - if: ${{ github.event_name == 'pull_request' && github.event.before != '0000000000000000000000000000000000000000' }} name: Get file changes id: file_changes diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 617fa22e54..e052cbe2da 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -18,10 +18,11 @@ jobs: - name: Checkout repo uses: actions/checkout@v3 - if: ${{ github.event_name == 'pull_request' }} - name: Setup dependencies + name: Setup dependencies for changed files action run: | sudo apt-get update -y sudo apt-get install -y jq + git config --global --add safe.directory "$GITHUB_WORKSPACE" - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes From 055b0d5c46bc3298c9a0ab368eef30d5ee5587c7 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 18 Apr 2023 23:52:27 +0000 Subject: [PATCH 190/231] no dnspython. --- requirements.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index da77e1a3ff..a588bc1593 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,9 @@ c65chewie==1.0.4 -# git+https://github.com/c65sdn/os-ken@1.0.0 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 ruamel.yaml==0.17.21 c65beka==1.0.1 -dnspython==2.3.0 os_ken==2.6.0 pytricia>=1.0.0 From d142dbd41fa043c51dc82021e9ab7cf458123139 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Apr 2023 04:02:04 +0000 Subject: [PATCH 191/231] Bump c65sdn/python3 from 1.0.16 to 1.0.17 Bumps c65sdn/python3 from 1.0.16 to 1.0.17. --- updated-dependencies: - dependency-name: c65sdn/python3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 19e344df9b..23b063993d 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.16 +FROM c65sdn/python3:1.0.17 COPY ./ /faucet-src/ From ec81b839109870df0c6df2b290dc4cf8668e9814 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 19 Apr 2023 04:02:05 +0000 Subject: [PATCH 192/231] Bump c65sdn/faucet from 1.0.46 to 1.0.47 Bumps c65sdn/faucet from 1.0.46 to 1.0.47. --- updated-dependencies: - dependency-name: c65sdn/faucet dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Dockerfile.gauge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.gauge b/Dockerfile.gauge index ca0babc169..d32b4372c0 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.46 +FROM c65sdn/faucet:1.0.47 VOLUME ["/etc/faucet/", "/var/log/faucet/"] From d10e54fa69bfe48d06a0012e751488f7e661b779 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 19 Apr 2023 11:38:36 +0000 Subject: [PATCH 193/231] c65faucet. --- faucet/prom_client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/faucet/prom_client.py b/faucet/prom_client.py index 3a70940c5d..e4ba07ba2f 100644 --- a/faucet/prom_client.py +++ b/faucet/prom_client.py @@ -53,7 +53,7 @@ class PromClient: # pylint: disable=too-few-public-methods def __init__(self, reg=None): if reg is not None: self._reg = reg - self.version = VersionInfo('faucet').semantic_version().release_string() + self.version = VersionInfo('c65faucet').semantic_version().release_string() self.faucet_version = PromGauge( 'faucet_pbr_version', 'Faucet PBR version', From fc5d60dcb1b89dd5ff962b32abcaf118d8ea55aa Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 19 Apr 2023 22:39:24 +0000 Subject: [PATCH 194/231] latest. --- Dockerfile.gauge | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.gauge b/Dockerfile.gauge index d32b4372c0..04a6553e35 100644 --- a/Dockerfile.gauge +++ b/Dockerfile.gauge @@ -1,6 +1,6 @@ ## Image name: faucet/gauge -FROM c65sdn/faucet:1.0.47 +FROM c65sdn/faucet:latest VOLUME ["/etc/faucet/", "/var/log/faucet/"] From 7084255e312dd151efd9e40e010f2ce21e3978fa Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 20 Apr 2023 00:33:08 +0000 Subject: [PATCH 195/231] timeout. --- faucet/fctl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/faucet/fctl.py b/faucet/fctl.py index c4fcefce0c..c8a0831364 100755 --- a/faucet/fctl.py +++ b/faucet/fctl.py @@ -51,7 +51,7 @@ def scrape_prometheus(endpoints, retries=3, err_output_file=sys.stdout): for _ in range(retries): try: if endpoint.startswith('http'): - response = requests.get(endpoint) + response = requests.get(endpoint, timeout=30) if response.status_code == requests.status_codes.codes.ok: # pylint: disable=no-member content = response.content.decode('utf-8', 'strict') break @@ -113,7 +113,8 @@ def get_samples(endpoints, metric_name, label_matches, nonzero_only=False, metrics, metric_name, label_matches, nonzero_only) -def report_label_match_metrics(report_metrics, metrics, display_labels=None, +def report_label_match_metrics(report_metrics, metrics, # pylint: disable=too-many-arguments + display_labels=None, nonzero_only=False, delim='\t', label_matches=None): """Text report on a list of Prometheus metrics.""" From 985a0dfd73ea08e84a8d98ad0b9c1ad8b664d657 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 2 May 2023 22:58:54 +0000 Subject: [PATCH 196/231] ruamel.yaml et al. --- docker-compose.yaml | 2 +- docs/requirements.txt | 2 +- requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index f228686f94..0172731648 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -31,7 +31,7 @@ services: grafana: restart: always - image: 'grafana/grafana:9.4.7' + image: 'grafana/grafana:9.5.1' user: 'root' ports: - '3000:3000' diff --git a/docs/requirements.txt b/docs/requirements.txt index 542850b05b..cb9791dfcc 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt -sphinx==6.2.0 +sphinx==7.0.0 sphinx_rtd_theme==1.2.0 sphinxcontrib-svg2pdfconverter==1.2.2 diff --git a/requirements.txt b/requirements.txt index a588bc1593..9e5d044358 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 -ruamel.yaml==0.17.21 +ruamel.yaml==0.17.22 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 1b76dec3f2a20cec9748866d7288c48ed0fdce28 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 2 May 2023 23:11:24 +0000 Subject: [PATCH 197/231] black. --- faucet/__main__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/faucet/__main__.py b/faucet/__main__.py index ec845a037a..a239370540 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -113,8 +113,8 @@ def parse_args(sys_args): def print_version(): """Print version number and exit.""" - version = VersionInfo('c65faucet').semantic_version().release_string() - message = 'c65faucet %s' % version + version = VersionInfo("c65faucet").semantic_version().release_string() + message = "c65faucet %s" % version print(message) From 85b4ddc06d64cac7b8946e7d119a99f4c2ed6174 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 2 May 2023 23:43:52 +0000 Subject: [PATCH 198/231] requirements. --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index cb9791dfcc..4b6bbb1d5f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt -sphinx==7.0.0 +sphinx==6.2.1 sphinx_rtd_theme==1.2.0 sphinxcontrib-svg2pdfconverter==1.2.2 From 2748a415ae286de70409334e00a12a8e7cee7ff3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 May 2023 04:02:47 +0000 Subject: [PATCH 199/231] Bump pypa/gh-action-pypi-publish from 1.8.5 to 1.8.6 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.5 to 1.8.6. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/v1.8.5...v1.8.6) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/release-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release-python.yml b/.github/workflows/release-python.yml index cf9befff1c..c709524ba1 100644 --- a/.github/workflows/release-python.yml +++ b/.github/workflows/release-python.yml @@ -24,7 +24,7 @@ jobs: - name: Build python package run: python3 setup.py sdist - name: Publish python package to PyPI - uses: pypa/gh-action-pypi-publish@v1.8.5 + uses: pypa/gh-action-pypi-publish@v1.8.6 with: user: __token__ password: ${{ secrets.PYPI_TOKEN }} From 2170da1a9ba608e1543dbfa98e5664dad248e105 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 5 May 2023 22:05:11 +0000 Subject: [PATCH 200/231] 23 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a588bc1593..61a8d7275e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 -ruamel.yaml==0.17.21 +ruamel.yaml==0.17.23 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 92d1bb4dae2018c010f7ad14de4d5c3c7a00c206 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 5 May 2023 22:09:46 +0000 Subject: [PATCH 201/231] black. --- faucet/__main__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/faucet/__main__.py b/faucet/__main__.py index ec845a037a..a239370540 100755 --- a/faucet/__main__.py +++ b/faucet/__main__.py @@ -113,8 +113,8 @@ def parse_args(sys_args): def print_version(): """Print version number and exit.""" - version = VersionInfo('c65faucet').semantic_version().release_string() - message = 'c65faucet %s' % version + version = VersionInfo("c65faucet").semantic_version().release_string() + message = "c65faucet %s" % version print(message) From 5045a8fd89490bd0dc21ac169f3aa4a08a592b5a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sun, 7 May 2023 04:28:37 +0000 Subject: [PATCH 202/231] ruamel.yaml '24. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 61a8d7275e..ea66f80ca3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 -ruamel.yaml==0.17.23 +ruamel.yaml==0.17.24 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 2133a35f7c36eedc897497c7cca88b7f206bcc90 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 10 May 2023 04:27:36 +0000 Subject: [PATCH 203/231] ruamel'26. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ea66f80ca3..6b29211b36 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.16.0 -ruamel.yaml==0.17.24 +ruamel.yaml==0.17.26 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 609ace98b6b657ac70bc2e6ccc8c23e05ee598ac Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Tue, 16 May 2023 06:43:04 +0000 Subject: [PATCH 204/231] 1.0.18 (alpine 3.18) --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 23b063993d..9a5cd42640 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.17 +FROM c65sdn/python3:1.0.18 COPY ./ /faucet-src/ From 614992a9ddd667ba13df390031d7b7853243fd5f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 May 2023 04:01:31 +0000 Subject: [PATCH 205/231] Bump sphinx-rtd-theme from 1.2.0 to 1.2.1 Bumps [sphinx-rtd-theme](https://github.com/readthedocs/sphinx_rtd_theme) from 1.2.0 to 1.2.1. - [Changelog](https://github.com/readthedocs/sphinx_rtd_theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/readthedocs/sphinx_rtd_theme/compare/1.2.0...1.2.1) --- updated-dependencies: - dependency-name: sphinx-rtd-theme dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 4b6bbb1d5f..f340684496 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt sphinx==6.2.1 -sphinx_rtd_theme==1.2.0 +sphinx_rtd_theme==1.2.1 sphinxcontrib-svg2pdfconverter==1.2.2 From 254bc6cb2350cbb961ff256a653c88f6eb43b060 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 04:00:47 +0000 Subject: [PATCH 206/231] Bump prometheus-client from 0.16.0 to 0.17.0 Bumps [prometheus-client](https://github.com/prometheus/client_python) from 0.16.0 to 0.17.0. - [Release notes](https://github.com/prometheus/client_python/releases) - [Commits](https://github.com/prometheus/client_python/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: prometheus-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 6b29211b36..5ff27b2034 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ c65chewie==1.0.4 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 -prometheus_client==0.16.0 +prometheus_client==0.17.0 ruamel.yaml==0.17.26 c65beka==1.0.1 os_ken==2.6.0 From 8d92c57696f2575ba174edaa088ec855a7c627d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 08:38:56 +0000 Subject: [PATCH 207/231] Bump sphinx from 6.2.1 to 7.0.1 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 6.2.1 to 7.0.1. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/master/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v6.2.1...v7.0.1) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index f340684496..448b7c306b 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt -sphinx==6.2.1 +sphinx==7.0.1 sphinx_rtd_theme==1.2.1 sphinxcontrib-svg2pdfconverter==1.2.2 From 54bda0fc1e6eda5a754beebbf8dc585322871447 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 25 May 2023 18:58:19 +0000 Subject: [PATCH 208/231] ruamel yaml '27 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 5ff27b2034..78301291ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.0 -ruamel.yaml==0.17.26 +ruamel.yaml==0.17.27 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 785eed6794cb2320beafb4549854efdaaca3b4de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 May 2023 04:01:05 +0000 Subject: [PATCH 209/231] Bump tj-actions/changed-files from 35 to 36 Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 35 to 36. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v35...v36) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-codecheck.yml | 2 +- .github/workflows/tests-docs.yml | 2 +- .github/workflows/tests-integration.yml | 4 ++-- .github/workflows/tests-unit.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tests-codecheck.yml b/.github/workflows/tests-codecheck.yml index 648fe61990..07b5512c53 100644 --- a/.github/workflows/tests-codecheck.yml +++ b/.github/workflows/tests-codecheck.yml @@ -22,7 +22,7 @@ jobs: - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes - uses: tj-actions/changed-files@v35 + uses: tj-actions/changed-files@v36 with: json: true json_raw_format: true diff --git a/.github/workflows/tests-docs.yml b/.github/workflows/tests-docs.yml index e66aaef921..69ba8d1cf6 100644 --- a/.github/workflows/tests-docs.yml +++ b/.github/workflows/tests-docs.yml @@ -22,7 +22,7 @@ jobs: - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes - uses: tj-actions/changed-files@v35 + uses: tj-actions/changed-files@v36 with: json: true json_raw_format: true diff --git a/.github/workflows/tests-integration.yml b/.github/workflows/tests-integration.yml index 54b97fd017..0213ef9fd7 100644 --- a/.github/workflows/tests-integration.yml +++ b/.github/workflows/tests-integration.yml @@ -25,7 +25,7 @@ jobs: - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes - uses: tj-actions/changed-files@v35 + uses: tj-actions/changed-files@v36 with: json: true json_raw_format: true @@ -95,7 +95,7 @@ jobs: - if: ${{ github.event_name == 'pull_request' && github.event.before != '0000000000000000000000000000000000000000' }} name: Get file changes id: file_changes - uses: tj-actions/changed-files@v35 + uses: tj-actions/changed-files@v36 with: json: true json_raw_format: true diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 78c02e846a..d5fd663e64 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -26,7 +26,7 @@ jobs: - if: ${{ github.event_name == 'pull_request' }} name: Get file changes id: file_changes - uses: tj-actions/changed-files@v35 + uses: tj-actions/changed-files@v36 with: json: true json_raw_format: true From fc40dcd04567d3ccd8c3ab003d7afe11031a3893 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Fri, 26 May 2023 13:35:56 +0000 Subject: [PATCH 210/231] ruamel '28 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 78301291ae..f99a78832d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.0 -ruamel.yaml==0.17.27 +ruamel.yaml==0.17.28 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 01a365de0e787e2f8ff166fc0970b52c42944a6a Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 31 May 2023 06:39:54 +0000 Subject: [PATCH 211/231] ruamel.yaml '31 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f99a78832d..1d66282868 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.0 -ruamel.yaml==0.17.28 +ruamel.yaml==0.17.31 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 5f41c5134e635d228afa3d5a505284bddcacc35f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 04:01:57 +0000 Subject: [PATCH 212/231] Bump sphinx-rtd-theme from 1.2.1 to 1.2.2 Bumps [sphinx-rtd-theme](https://github.com/readthedocs/sphinx_rtd_theme) from 1.2.1 to 1.2.2. - [Changelog](https://github.com/readthedocs/sphinx_rtd_theme/blob/master/docs/changelog.rst) - [Commits](https://github.com/readthedocs/sphinx_rtd_theme/compare/1.2.1...1.2.2) --- updated-dependencies: - dependency-name: sphinx-rtd-theme dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index f340684496..01b10036f6 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt sphinx==6.2.1 -sphinx_rtd_theme==1.2.1 +sphinx_rtd_theme==1.2.2 sphinxcontrib-svg2pdfconverter==1.2.2 From 0d6b4bbb6a67f5b7d49545200de719d27b7287ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 04:07:42 +0000 Subject: [PATCH 213/231] Bump ruamel-yaml from 0.17.31 to 0.17.32 Bumps [ruamel-yaml](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree) from 0.17.31 to 0.17.32. --- updated-dependencies: - dependency-name: ruamel-yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1d66282868..c227e4f27b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.0 -ruamel.yaml==0.17.31 +ruamel.yaml==0.17.32 c65beka==1.0.1 os_ken==2.6.0 pytricia>=1.0.0 From 82c3b3b6b9d7ba3ee9c26ae42ce68889849edbf7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 03:12:08 +0000 Subject: [PATCH 214/231] Bump black from 23.3.0 to 23.7.0 Bumps [black](https://github.com/psf/black) from 23.3.0 to 23.7.0. - [Release notes](https://github.com/psf/black/releases) - [Changelog](https://github.com/psf/black/blob/main/CHANGES.md) - [Commits](https://github.com/psf/black/compare/23.3.0...23.7.0) --- updated-dependencies: - dependency-name: black dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index ca3e75e36b..e7c4b58889 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ -black==23.3.0 +black==23.7.0 flake8==6.0.0 pylint==2.17.4 pytype==2023.5.8 From 25a18fbb4a9b06d8ab725e87a23b7144066abb97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Jul 2023 03:12:12 +0000 Subject: [PATCH 215/231] Bump prometheus-client from 0.17.0 to 0.17.1 Bumps [prometheus-client](https://github.com/prometheus/client_python) from 0.17.0 to 0.17.1. - [Release notes](https://github.com/prometheus/client_python/releases) - [Commits](https://github.com/prometheus/client_python/compare/v0.17.0...v0.17.1) --- updated-dependencies: - dependency-name: prometheus-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c227e4f27b..2dff75c2bd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ c65chewie==1.0.4 influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 -prometheus_client==0.17.0 +prometheus_client==0.17.1 ruamel.yaml==0.17.32 c65beka==1.0.1 os_ken==2.6.0 From f9390f8b21b51e1c7c60b8ce2d854c8e1fcda56f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 29 Jul 2023 22:09:52 +0000 Subject: [PATCH 216/231] Bump flake8 from 6.0.0 to 6.1.0 Bumps [flake8](https://github.com/pycqa/flake8) from 6.0.0 to 6.1.0. - [Commits](https://github.com/pycqa/flake8/compare/6.0.0...6.1.0) --- updated-dependencies: - dependency-name: flake8 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index cb50a2a026..44a1fde4ee 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ black==23.7.0 -flake8==6.0.0 +flake8==6.1.0 pylint==2.17.5 pytype==2023.5.8 From ced07bb065abbfbb2e0f7a266254f37f3c7b30c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Aug 2023 03:07:49 +0000 Subject: [PATCH 217/231] Bump os-ken from 2.6.0 to 2.7.0 Bumps [os-ken](http://www.openstack.org/) from 2.6.0 to 2.7.0. --- updated-dependencies: - dependency-name: os-ken dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2dff75c2bd..e087561cf1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,5 +5,5 @@ pbr>=1.9 prometheus_client==0.17.1 ruamel.yaml==0.17.32 c65beka==1.0.1 -os_ken==2.6.0 +os_ken==2.7.0 pytricia>=1.0.0 From abdc2820fec2b78ca23ed149d38d827ddfb19c54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 22:28:00 +0000 Subject: [PATCH 218/231] Bump sphinx from 7.2.4 to 7.2.5 Bumps [sphinx](https://github.com/sphinx-doc/sphinx) from 7.2.4 to 7.2.5. - [Release notes](https://github.com/sphinx-doc/sphinx/releases) - [Changelog](https://github.com/sphinx-doc/sphinx/blob/master/CHANGES) - [Commits](https://github.com/sphinx-doc/sphinx/compare/v7.2.4...v7.2.5) --- updated-dependencies: - dependency-name: sphinx dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- docs/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/requirements.txt b/docs/requirements.txt index 91c4076a53..0125932239 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -r ../requirements.txt -sphinx==7.2.4 +sphinx==7.2.5 sphinx_rtd_theme==1.3.0 sphinxcontrib-svg2pdfconverter==1.2.2 From 78f9c1489963b4655e64c2d7d915ecc51ef9afcb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Sep 2023 03:27:47 +0000 Subject: [PATCH 219/231] Bump codecov/codecov-action from 3 to 4 Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 3 to 4. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v3...v4) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 7471192945..5bfa8f3cc9 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -68,7 +68,7 @@ jobs: ./tests/run_unit_tests.sh - if: ${{ matrix.python-version == env.CODECOV_PY_VER }} name: Upload codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 - if: ${{ env.FILES_CHANGED == 'all' || env.RQ_FILES_CHANGED || env.PY_FILES_CHANGED }} name: Run pytype run: | From 50cff90156a9e53e5926894dfb6f1760b47cc298 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 23 Sep 2023 10:50:00 +1200 Subject: [PATCH 220/231] 1.0.19 --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index 9a5cd42640..ff6c2c85d4 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.18 +FROM c65sdn/python3:1.0.19 COPY ./ /faucet-src/ From 9b7d37473f1abce73a0b63b9ad0959d0cd81dd41 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Sat, 23 Sep 2023 10:58:36 +1200 Subject: [PATCH 221/231] codecov --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 5d30b66c3f..299c18ff57 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -68,7 +68,7 @@ jobs: ./tests/run_unit_tests.sh - if: ${{ matrix.python-version == env.CODECOV_PY_VER }} name: Upload codecov - uses: codecov/codecov-action@v4 + uses: codecov/codecov-action@v3 - if: ${{ env.FILES_CHANGED == 'all' || env.RQ_FILES_CHANGED || env.PY_FILES_CHANGED }} name: Run pytype run: | From 0ba23c94c9ba9d248f40100c2647a2d60c952b08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 29 Sep 2023 03:37:30 +0000 Subject: [PATCH 222/231] Bump ruamel-yaml from 0.17.32 to 0.17.33 Bumps [ruamel-yaml](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree) from 0.17.32 to 0.17.33. --- updated-dependencies: - dependency-name: ruamel-yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e087561cf1..bfd0d4909d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.1 -ruamel.yaml==0.17.32 +ruamel.yaml==0.17.33 c65beka==1.0.1 os_ken==2.7.0 pytricia>=1.0.0 From f36dd1e500180ac473f0fe4f210e2509b9fcb8fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 03:33:24 +0000 Subject: [PATCH 223/231] Bump pylint from 2.17.5 to 3.0.0 Bumps [pylint](https://github.com/pylint-dev/pylint) from 2.17.5 to 3.0.0. - [Release notes](https://github.com/pylint-dev/pylint/releases) - [Commits](https://github.com/pylint-dev/pylint/compare/v2.17.5...v3.0.0) --- updated-dependencies: - dependency-name: pylint dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 7c7f093fa1..648e6e3774 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ black==23.9.1 flake8==6.1.0 -pylint==2.17.5 +pylint==3.0.0 pytype==2023.9.19 From d25bb22a1c947140ef6a0c7d56a533008574c093 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 05:57:27 +0000 Subject: [PATCH 224/231] Bump pytype from 2023.9.19 to 2023.9.27 Bumps [pytype](https://github.com/google/pytype) from 2023.9.19 to 2023.9.27. - [Changelog](https://github.com/google/pytype/blob/main/CHANGELOG) - [Commits](https://github.com/google/pytype/compare/2023.09.19...2023.09.27) --- updated-dependencies: - dependency-name: pytype dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- codecheck-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/codecheck-requirements.txt b/codecheck-requirements.txt index 81bd185dd8..a4e9ba092f 100644 --- a/codecheck-requirements.txt +++ b/codecheck-requirements.txt @@ -1,4 +1,4 @@ black==23.9.1 flake8==6.1.0 pylint==2.17.7 -pytype==2023.9.19 +pytype==2023.9.27 From 1a30dacc15231fbe33378848a647e7ce1cbd1a60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 03:58:25 +0000 Subject: [PATCH 225/231] Bump ruamel-yaml from 0.17.33 to 0.17.34 Bumps [ruamel-yaml](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree) from 0.17.33 to 0.17.34. --- updated-dependencies: - dependency-name: ruamel-yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bfd0d4909d..6a729b4f29 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.1 -ruamel.yaml==0.17.33 +ruamel.yaml==0.17.34 c65beka==1.0.1 os_ken==2.7.0 pytricia>=1.0.0 From 177e24014264336016ae585df8ab03b9c979e0b9 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Wed, 4 Oct 2023 22:39:46 +0000 Subject: [PATCH 226/231] python 3.12. --- .github/workflows/tests-unit.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 299c18ff57..74cce03b1e 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8, 3.9, '3.10', 3.11] + python-version: [3.8, 3.9, '3.10', 3.11, 3.12] steps: - name: Checkout repo uses: actions/checkout@v4 From f09ba6be3f192199fa6660cafa3cd97747226bcc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 03:56:23 +0000 Subject: [PATCH 227/231] Bump ruamel-yaml from 0.17.34 to 0.17.35 Bumps [ruamel-yaml](https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree) from 0.17.34 to 0.17.35. --- updated-dependencies: - dependency-name: ruamel-yaml dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f064690cbf..19ab43ffe3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ influxdb>=2.12.0 networkx>=1.9 pbr>=1.9 prometheus_client==0.17.1 -ruamel.yaml==0.17.34 +ruamel.yaml==0.17.35 c65beka==1.0.1 os_ken==2.7.0 pytricia>=1.0.0 From eacb17dbb181995c94b71c2b74de1be9d5a712fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 5 Oct 2023 03:57:15 +0000 Subject: [PATCH 228/231] Bump c65sdn/python3 from 1.0.19 to 1.0.20 Bumps c65sdn/python3 from 1.0.19 to 1.0.20. --- updated-dependencies: - dependency-name: c65sdn/python3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- Dockerfile.faucet | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.faucet b/Dockerfile.faucet index ff6c2c85d4..cacbaa88e0 100644 --- a/Dockerfile.faucet +++ b/Dockerfile.faucet @@ -1,6 +1,6 @@ ## Image name: faucet/faucet -FROM c65sdn/python3:1.0.19 +FROM c65sdn/python3:1.0.20 COPY ./ /faucet-src/ From 5a7419a25902dd7e69ed85e590be0f62b1ef86c9 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 5 Oct 2023 07:24:13 +0000 Subject: [PATCH 229/231] fixed eventlet. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index f064690cbf..f0c225a544 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,5 @@ ruamel.yaml==0.17.34 c65beka==1.0.1 os_ken==2.7.0 pytricia>=1.0.0 +eventlet@git+https://github.com/hroncok/eventlet@python3.12 https://github.com/faucetsdn/python3-fakencclient/archive/main.tar.gz From 2746d5b2b5b1efec467cf0f61f0ee2f8cf48b943 Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Thu, 5 Oct 2023 07:34:12 +0000 Subject: [PATCH 230/231] don't need packaging. --- tests/unit/packaging/test_packaging.py | 132 ------------------------- 1 file changed, 132 deletions(-) delete mode 100755 tests/unit/packaging/test_packaging.py diff --git a/tests/unit/packaging/test_packaging.py b/tests/unit/packaging/test_packaging.py deleted file mode 100755 index 1058790e0c..0000000000 --- a/tests/unit/packaging/test_packaging.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 - -"""Test FAUCET packaging""" - -# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. -# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd. -# Copyright (C) 2015--2019 The Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import unittest - -from deb_pkg_tools.control import parse_control_fields -from deb_pkg_tools.deb822 import parse_deb822 -from deb_pkg_tools.deps import VersionedRelationship -import requirements - - -class CheckDebianPackageTestCase(unittest.TestCase): # pytype: disable=module-attr - """Test debian packaging.""" - - def _parse_deb_control(self, control_file): - with open(control_file, "r", encoding="utf-8") as handle: - control = handle.read() - - faucet_dpkg = str() - for line in control.split("\n"): - if line.startswith("Package: python3-faucet"): - faucet_dpkg += line - elif faucet_dpkg: - if not line: - break - faucet_dpkg += "\n{}".format(line) - - faucet_dpkg = parse_control_fields(parse_deb822(faucet_dpkg)) - self.faucet_dpkg_deps = {} - for dep in faucet_dpkg["Depends"]: - if isinstance(dep, VersionedRelationship): - if dep.name not in self.faucet_dpkg_deps: - self.faucet_dpkg_deps[dep.name] = [] - self.faucet_dpkg_deps[dep.name].append( - "{}{}".format(dep.operator, dep.version) - ) - - def _parse_pip_requirements(self, requirements_file): - self.faucet_pip_reqs = {} - with open(requirements_file, "r", encoding="utf-8") as handle: - for pip_req in requirements.parse(handle): - if pip_req.name is None: - continue - self.faucet_pip_reqs[pip_req.name] = pip_req.specs - - def _pip_req_to_dpkg_name(self, pip_req): - if pip_req in self.dpkg_name: - return self.dpkg_name[pip_req] - return "python3-" + pip_req - - def setUp(self): - src_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../") - control_file = os.path.join(src_dir, "debian/control") - requirements_file = os.path.join(src_dir, "requirements.txt") - - self.dpkg_name = { - "os_ken": "python3-os-ken", - "prometheus_client": "python3-prometheus-client", - } - - self._parse_deb_control(control_file) - self._parse_pip_requirements(requirements_file) - - def disabled_test_pip_reqs_in_deb_package(self): - """Test pip requirements are listed as dependencies on debian package.""" - - for pip_req in self.faucet_pip_reqs: - dpkg_name = self._pip_req_to_dpkg_name(pip_req) - self.assertIn(dpkg_name, self.faucet_dpkg_deps) - - def disabled_test_pip_reqs_versions_match_deb_package(self): - """Test pip requirements versions match debian package dependencies.""" - - for pip_req, pip_req_versions in self.faucet_pip_reqs.items(): - dpkg_name = self._pip_req_to_dpkg_name(pip_req) - - if pip_req_versions: - debian_package_dependencies = [ - dpkg_name + x for x in self.faucet_dpkg_deps[dpkg_name] - ] - for pip_req_specifier, pip_req_version in pip_req_versions: - if pip_req_specifier == "==": - # debian/control is annoying about how it handles exact - # versions, calculate the debian equivalent of the - # pip requirements match and compare that - lower_version = pip_req_version - lower_match = ">=" + lower_version - - upper_version = pip_req_version.split(".") - upper_version[-1] = str(int(upper_version[-1]) + 1) - upper_version = ".".join(upper_version) - upper_match = "<<" + upper_version - - self.assertIn( - dpkg_name + lower_match, debian_package_dependencies - ) - self.assertIn( - dpkg_name + upper_match, debian_package_dependencies - ) - elif pip_req_specifier == "<": - # debian/control uses << instead of < - match = dpkg_name + "<<" + pip_req_version - self.assertIn(match, debian_package_dependencies) - elif pip_req_specifier == ">": - # debian/control uses >> instead of > - match = dpkg_name + ">>" + pip_req_version - self.assertIn(match, debian_package_dependencies) - else: - match = dpkg_name + pip_req_specifier + pip_req_version - self.assertIn(match, debian_package_dependencies) - - -if __name__ == "__main__": - unittest.main() # pytype: disable=module-attr From 239ed81725c82ea74d7eeb7a2a1345892e0e2a9f Mon Sep 17 00:00:00 2001 From: Josh Bailey Date: Mon, 23 Oct 2023 19:15:21 +1300 Subject: [PATCH 231/231] 3.8 --- .github/workflows/tests-unit.yml | 4 +- setup.py | 8 +- tests/unit/packaging/test_packaging.py | 132 ------------------------- 3 files changed, 6 insertions(+), 138 deletions(-) delete mode 100755 tests/unit/packaging/test_packaging.py diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index ac3ef2b122..126a4cae12 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -5,7 +5,7 @@ on: [push, pull_request] env: FILES_CHANGED: "all" CODECOV_PY_VER: '3.10' - USING_PYTYPE: '3.8,3.9,3.10,3.11' + USING_PYTYPE: '3.9,3.10,3.11' jobs: unit-tests: @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8, 3.9, '3.10', 3.11, 3.12] + python-version: [3.9, '3.10', 3.11, 3.12] steps: - name: Checkout repo uses: actions/checkout@v4 diff --git a/setup.py b/setup.py index 68035cab7a..127bc14f8a 100755 --- a/setup.py +++ b/setup.py @@ -18,19 +18,19 @@ print( """You are trying to install faucet on python {py} -Faucet is not compatible with python 2, please upgrade to python 3.8 or newer.""".format( +Faucet is not compatible with python 2, please upgrade to python 3.9 or newer.""".format( py=".".join([str(v) for v in sys.version_info[:3]]) ), file=sys.stderr, ) sys.exit(1) -elif sys.version_info < (3, 8): +elif sys.version_info < (3, 9): print( """You are trying to install faucet on python {py} Faucet 1.9.0 and above are no longer compatible with older versions of python 3. -Please upgrade to python 3.7 or newer.""".format( +Please upgrade to python 3.9 or newer.""".format( py=".".join([str(v) for v in sys.version_info[:3]]) ) ) @@ -106,7 +106,7 @@ def setup_faucet_log(): setup( name="faucet", setup_requires=["pbr>=1.9", "setuptools>=17.1"], - python_requires=">=3.8", + python_requires=">=3.9", pbr=True, ) diff --git a/tests/unit/packaging/test_packaging.py b/tests/unit/packaging/test_packaging.py deleted file mode 100755 index 15ceb663e5..0000000000 --- a/tests/unit/packaging/test_packaging.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python3 - -"""Test FAUCET packaging""" - -# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. -# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd. -# Copyright (C) 2015--2019 The Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -import unittest - -from deb_pkg_tools.control import parse_control_fields -from deb_pkg_tools.deb822 import parse_deb822 -from deb_pkg_tools.deps import VersionedRelationship -import requirements - - -class CheckDebianPackageTestCase(unittest.TestCase): # pytype: disable=module-attr - """Test debian packaging.""" - - def _parse_deb_control(self, control_file): - with open(control_file, "r", encoding="utf-8") as handle: - control = handle.read() - - faucet_dpkg = str() - for line in control.split("\n"): - if line.startswith("Package: python3-faucet"): - faucet_dpkg += line - elif faucet_dpkg: - if not line: - break - faucet_dpkg += "\n{}".format(line) - - faucet_dpkg = parse_control_fields(parse_deb822(faucet_dpkg)) - self.faucet_dpkg_deps = {} - for dep in faucet_dpkg["Depends"]: - if isinstance(dep, VersionedRelationship): - if dep.name not in self.faucet_dpkg_deps: - self.faucet_dpkg_deps[dep.name] = [] - self.faucet_dpkg_deps[dep.name].append( - "{}{}".format(dep.operator, dep.version) - ) - - def _parse_pip_requirements(self, requirements_file): - self.faucet_pip_reqs = {} - with open(requirements_file, "r", encoding="utf-8") as handle: - for pip_req in requirements.parse(handle): - if pip_req.name is None or pip_req.local_file: - continue - self.faucet_pip_reqs[pip_req.name] = pip_req.specs - - def _pip_req_to_dpkg_name(self, pip_req): - if pip_req in self.dpkg_name: - return self.dpkg_name[pip_req] - return "python3-" + pip_req - - def setUp(self): - src_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../") - control_file = os.path.join(src_dir, "debian/control") - requirements_file = os.path.join(src_dir, "requirements.txt") - - self.dpkg_name = { - "os_ken": "python3-os-ken", - "prometheus_client": "python3-prometheus-client", - } - - self._parse_deb_control(control_file) - self._parse_pip_requirements(requirements_file) - - def test_pip_reqs_in_deb_package(self): - """Test pip requirements are listed as dependencies on debian package.""" - - for pip_req in self.faucet_pip_reqs: - dpkg_name = self._pip_req_to_dpkg_name(pip_req) - self.assertIn(dpkg_name, self.faucet_dpkg_deps) - - def test_pip_reqs_versions_match_deb_package(self): - """Test pip requirements versions match debian package dependencies.""" - - for pip_req, pip_req_versions in self.faucet_pip_reqs.items(): - dpkg_name = self._pip_req_to_dpkg_name(pip_req) - - if pip_req_versions: - debian_package_dependencies = [ - dpkg_name + x for x in self.faucet_dpkg_deps[dpkg_name] - ] - for pip_req_specifier, pip_req_version in pip_req_versions: - if pip_req_specifier == "==": - # debian/control is annoying about how it handles exact - # versions, calculate the debian equivalent of the - # pip requirements match and compare that - lower_version = pip_req_version - lower_match = ">=" + lower_version - - upper_version = pip_req_version.split(".") - upper_version[-1] = str(int(upper_version[-1]) + 1) - upper_version = ".".join(upper_version) - upper_match = "<<" + upper_version - - self.assertIn( - dpkg_name + lower_match, debian_package_dependencies - ) - self.assertIn( - dpkg_name + upper_match, debian_package_dependencies - ) - elif pip_req_specifier == "<": - # debian/control uses << instead of < - match = dpkg_name + "<<" + pip_req_version - self.assertIn(match, debian_package_dependencies) - elif pip_req_specifier == ">": - # debian/control uses >> instead of > - match = dpkg_name + ">>" + pip_req_version - self.assertIn(match, debian_package_dependencies) - else: - match = dpkg_name + pip_req_specifier + pip_req_version - self.assertIn(match, debian_package_dependencies) - - -if __name__ == "__main__": - unittest.main() # pytype: disable=module-attr