From 3bf1abb2dcaa03f02a245609127fc316829a89a2 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Mon, 29 Aug 2022 08:19:28 -0700 Subject: [PATCH 01/52] Address Review Comment to define SONIC_GLOBAL_DB_CLI in gbsyncd.sh (#11857) As part of PR #11754 Change was added to use variable SONIC_DB_NS_CLI for namespace but that will not work since ./files/scripts/syncd_common.sh uses SONIC_DB_CLI. So revert back to use SONIC_DB_CLI and define new variable for SONIC_GLOBAL_DB_CLI for global/host db cli access Also fixed DB_CLI not working for namespace. --- files/scripts/gbsyncd.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/scripts/gbsyncd.sh b/files/scripts/gbsyncd.sh index a7eaaced0cd6..0948aaadc199 100755 --- a/files/scripts/gbsyncd.sh +++ b/files/scripts/gbsyncd.sh @@ -4,8 +4,8 @@ function startplatform() { - declare -a DbCliArray=($SONIC_DB_CLI $SONIC_DB_NS_CLI) - for DB_CLI in ${DbCliArray[@]}; do + declare -A DbCliArray=([0]=$SONIC_GLOBAL_DB_CLI [1]=$SONIC_DB_CLI) + for DB_CLI in "${DbCliArray[@]}"; do # Add gbsyncd to FEATURE table, if not in. It did have same config as syncd. if [ -z $($DB_CLI CONFIG_DB HGET 'FEATURE|gbsyncd' state) ]; then local CMD="local r=redis.call('DUMP', KEYS[1]); redis.call('RESTORE', KEYS[2], 0, r)" @@ -34,11 +34,11 @@ PEER="swss" DEBUGLOG="/tmp/swss-$SERVICE-debug$DEV.log" LOCKFILE="/tmp/swss-$SERVICE-lock$DEV" NAMESPACE_PREFIX="asic" +SONIC_GLOBAL_DB_CLI="sonic-db-cli" SONIC_DB_CLI="sonic-db-cli" -SONIC_DB_NS_CLI="sonic-db-cli" if [ "$DEV" ]; then NET_NS="$NAMESPACE_PREFIX$DEV" #name of the network namespace - SONIC_DB_NS_CLI="sonic-db-cli -n $NET_NS" + SONIC_DB_CLI="sonic-db-cli -n $NET_NS" fi case "$1" in From a1d3d994576cfb467c19e2a7394277b6d92a4e71 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 30 Aug 2022 02:15:42 +0800 Subject: [PATCH 02/52] [Build] Increase the size of the installer image (#11869) #### Why I did it Fix the build failure caused by the installer image size too small. The installer image is only used during the build, not impact the final images. See https://dev.azure.com/mssonic/build/_build/results?buildId=139926&view=logs&j=cef3d8a9-152e-5193-620b-567dc18af272&t=359769c4-8b5e-5976-a793-85da132e0a6f ``` + fallocate -l 2048M ./sonic-installer.img + mkfs.vfat ./sonic-installer.img mkfs.fat 4.2 (2021-01-31) ++ mktemp -d + tmpdir=/tmp/tmp.TqdDSc00Cn + mount -o loop ./sonic-installer.img /tmp/tmp.TqdDSc00Cn + cp target/sonic-vs.bin /tmp/tmp.TqdDSc00Cn/onie-installer.bin cp: error writing '/tmp/tmp.TqdDSc00Cn/onie-installer.bin': No space left on device [ FAIL LOG END ] [ target/sonic-vs.img.gz ] ``` #### How I did it Increase the size from 2048M to 4096M. Why not increase to 16G like qcow2 image? The qcow2 supports the sparse disk, although a big disk size allocated, but it will not consume the real disk size. The falocate does not support the sparse disk. We do not want to allocate a very big disk, but no use at all. It will require more space to build. --- scripts/build_kvm_image.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build_kvm_image.sh b/scripts/build_kvm_image.sh index 6e5fd7dec742..44009ed013f4 100755 --- a/scripts/build_kvm_image.sh +++ b/scripts/build_kvm_image.sh @@ -36,7 +36,7 @@ create_disk() prepare_installer_disk() { - fallocate -l 2048M $INSTALLER_DISK + fallocate -l 4096M $INSTALLER_DISK mkfs.vfat $INSTALLER_DISK From 186568a21d1998868ca0a8d8e888ef35a5d17bf8 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Mon, 29 Aug 2022 11:34:23 -0700 Subject: [PATCH 03/52] Update sensor names for msn4600c for the 5.10 kernel (#11491) * Update sensor names for msn4600c for the 5.10 kernel Looks like a sensor was removed in the 5.10 kernel for the tps53679 sensor, so the names/indexing has changed. Related to Azure/sonic-mgmt#4513. Signed-off-by: Saikrishna Arcot * Update sensors file Signed-off-by: Saikrishna Arcot Signed-off-by: Saikrishna Arcot --- .../x86_64-mlnx_msn4600c-r0/sensors.conf | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/device/mellanox/x86_64-mlnx_msn4600c-r0/sensors.conf b/device/mellanox/x86_64-mlnx_msn4600c-r0/sensors.conf index 3ff78f15023f..b0ad1ff407b4 100644 --- a/device/mellanox/x86_64-mlnx_msn4600c-r0/sensors.conf +++ b/device/mellanox/x86_64-mlnx_msn4600c-r0/sensors.conf @@ -129,27 +129,29 @@ bus "i2c-5" "i2c-1-mux (chan_id 4)" bus "i2c-15" "i2c-1-mux (chan_id 6)" chip "tps53679-i2c-*-58" - label in1 "PMIC-8 PSU 12V Rail (in1)" - label in2 "PMIC-8 PSU 12V Rail (in2)" - label in3 "PMIC-8 COMEX 1.8V Rail (out)" - label in4 "PMIC-8 COMEX 1.05V Rail (out)" + label in1 "PMIC-8 PSU 12V Rail (in)" + label in2 "PMIC-8 COMEX 1.8V Rail (out)" + label in3 "PMIC-8 COMEX 1.05V Rail (out)" label temp1 "PMIC-8 Temp 1" label temp2 "PMIC-8 Temp 2" - label power1 "PMIC-8 COMEX 1.8V Rail Pwr (out)" - label power2 "PMIC-8 COMEX 1.05V Rail Pwr (out)" - label curr1 "PMIC-8 COMEX 1.8V Rail Curr (out)" - label curr2 "PMIC-8 COMEX 1.05V Rail Curr (out)" + ignore power1 + label power2 "PMIC-8 COMEX 1.8V Rail Pwr (out)" + label power3 "PMIC-8 COMEX 1.05V Rail Pwr (out)" + ignore curr1 + label curr2 "PMIC-8 COMEX 1.8V Rail Curr (out)" + label curr3 "PMIC-8 COMEX 1.05V Rail Curr (out)" chip "tps53679-i2c-*-61" - label in1 "PMIC-9 PSU 12V Rail (in1)" - label in2 "PMIC-9 PSU 12V Rail (in2)" - label in3 "PMIC-9 COMEX 1.2V Rail (out)" - ignore in4 + label in1 "PMIC-9 PSU 12V Rail (in)" + label in2 "PMIC-9 COMEX 1.2V Rail (out)" + ignore in3 label temp1 "PMIC-9 Temp 1" label temp2 "PMIC-9 Temp 2" - label power1 "PMIC-9 COMEX 1.2V Rail Pwr (out)" - ignore power2 - label curr1 "PMIC-9 COMEX 1.2V Rail Curr (out)" - ignore curr2 + ignore power1 + label power2 "PMIC-9 COMEX 1.2V Rail Pwr (out)" + ignore power3 + ignore curr1 + label curr2 "PMIC-9 COMEX 1.2V Rail Curr (out)" + ignore curr3 # Power supplies bus "i2c-4" "i2c-1-mux (chan_id 3)" From de54eece467452b0909cdf13a8a085b8303812ca Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Mon, 29 Aug 2022 11:35:07 -0700 Subject: [PATCH 04/52] Fix error handling when failing to install a deb package (#11846) The current error handling code for when a deb package fails to be installed currently has a chain of commands linked together by && and ends with `exit 1`. The assumption is that the commands would succeed, and the last `exit 1` would end it with a non-zero return code, thus fully failing the target and causing the build to stop because of bash's -e flag. However, if one of the commands prior to `exit 1` returns a non-zero return code, then bash won't actually treat it as a terminating error. From bash's man page: ``` -e Exit immediately if a pipeline (which may consist of a single simple command), a list, or a compound command (see SHELL GRAMMAR above), exits with a non-zero status. The shell does not exit if the command that fails is part of the command list immediately following a while or until keyword, part of the test following the if or elif reserved words, part of any command executed in a && or || list except the command following the final && or ||, any command in a pipeline but the last, or if the command's return value is being inverted with !. If a compound command other than a subshell returns a non-zero status because a command failed while -e was being ignored, the shell does not exit. ``` The part `part of any command executed in a && or || list except the command following the final && or ||` says that if the failing command is not the `exit 1` that we have at the end, then bash doesn't treat it as an error and exit immediately. Additionally, since this is a compound command, but isn't in a subshell (subshell are marked by `(` and `)`, whereas `{` and `}` just tells bash to run the commands in the current environment), bash doesn't exist. The result of this is that in the deb-install target, if a package installation fails, it may be infinitely stuck in that while-loop. There are two fixes for this: change to using a subshell, or use `;` instead of `&&`. Using a subshell would, I think, require exporting any shell variables used in the subshell, so I chose to change the `&&` to `;`. In addition, at the start of the subshell, `set +e` is added in, which removes the exit-on-error handling of bash. This makes sure that all commands are run (the output of which may help for debugging) and that it still exits with 1, which will then fully fail the target. Signed-off-by: Saikrishna Arcot Signed-off-by: Saikrishna Arcot --- slave.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/slave.mk b/slave.mk index e1f4a0ef80d9..7cdee954ad73 100644 --- a/slave.mk +++ b/slave.mk @@ -736,13 +736,13 @@ $(SONIC_INSTALL_DEBS) : $(DEBS_PATH)/%-install : .platform $$(addsuffix -install # put a lock here because dpkg does not allow installing packages in parallel if mkdir $(DEBS_PATH)/dpkg_lock &> /dev/null; then ifneq ($(CROSS_BUILD_ENVIRON),y) - { sudo DEBIAN_FRONTEND=noninteractive dpkg -i $(DEBS_PATH)/$* $(LOG) && rm -d $(DEBS_PATH)/dpkg_lock && break; } || { rm -d $(DEBS_PATH)/dpkg_lock && sudo lsof /var/lib/dpkg/lock-frontend && ps aux && exit 1 ; } + { sudo DEBIAN_FRONTEND=noninteractive dpkg -i $(DEBS_PATH)/$* $(LOG) && rm -d $(DEBS_PATH)/dpkg_lock && break; } || { set +e; rm -d $(DEBS_PATH)/dpkg_lock; sudo lsof /var/lib/dpkg/lock-frontend; ps aux; exit 1 ; } else # Relocate debian packages python libraries to the cross python virtual env location { sudo DEBIAN_FRONTEND=noninteractive dpkg -i $(if $(findstring $(LINUX_HEADERS),$*),--force-depends) $(DEBS_PATH)/$* $(LOG) && \ rm -rf tmp && mkdir tmp && dpkg -x $(DEBS_PATH)/$* tmp && (sudo cp -rf tmp/usr/lib/python2*/dist-packages/* $(VIRTENV_LIB_CROSS_PYTHON2)/python2*/site-packages/ 2>/dev/null || true) && \ (sudo cp -rf tmp/usr/lib/python3/dist-packages/* $(VIRTENV_LIB_CROSS_PYTHON3)/python3.*/site-packages/ 2>/dev/null || true) && \ - rm -d $(DEBS_PATH)/dpkg_lock && break; } || { rm -d $(DEBS_PATH)/dpkg_lock && sudo lsof /var/lib/dpkg/lock-frontend && ps aux && exit 1 ; } + rm -d $(DEBS_PATH)/dpkg_lock && break; } || { set +e; rm -d $(DEBS_PATH)/dpkg_lock; sudo lsof /var/lib/dpkg/lock-frontend; ps aux; exit 1 ; } endif fi sleep 10 From 4733053c53dde1c07dbfe72aeb4232e41bef54a4 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 30 Aug 2022 09:19:58 +0800 Subject: [PATCH 05/52] Fix vs check install login timeout issue (#11727) Why I did it Fix a build not stable issue: #11620 The vs vm has started successfully, but failed to wait for the message "sonic login:". There were 55 builds failed caused by the issue in the last 30 days. AzurePipelineBuildLogs | where startTime > ago(30d) | where type =~ "task" | where result =~ "failed" | where name =~ "Build sonic image" | where content contains "Timeout exceeded" | where content contains "re.compile('sonic login:')" | project-away content | extend branchName=case(reason=~"pullRequest", tostring(todynamic(parameters)['system.pullRequest.targetBranch']), replace("refs/heads/", "", sourceBranch)) | summarize FailedCount=dcount(buildId) by branchName branchName FailedCount master 37 202012 9 202106 4 202111 2 202205 1 201911 1 It is caused by the login message mixed with the output message of the /etc/rc.local, one of the examples as below: (see the message rc.local[307]: sonic+ onie_disco_subnet=255.255.255.0 login: ) The check_install.py was waiting for the message "sonic login:", and Linux console was waiting for the username input (the login message has already printed in the console). https://dev.azure.com/mssonic/build/_build/results?buildId=123294&view=logs&j=cef3d8a9-152e-5193-620b-567dc18af272&t=359769c4-8b5e-5976-a793-85da132e0a6f 2022-07-17T15:00:58.9198877Z [ 25.493855] rc.local[307]: + onie_disco_opt53=05 2022-07-17T15:00:58.9199330Z [ 25.595054] rc.local[307]: + onie_disco_router=10.0.2.2 2022-07-17T15:00:58.9199781Z [ 25.699409] rc.local[307]: + onie_disco_serverid=10.0.2.2 2022-07-17T15:00:58.9200252Z [ 25.789891] rc.local[307]: + onie_disco_siaddr=10.0.2.2 2022-07-17T15:00:58.9200622Z [ 25.880920] 2022-07-17T15:00:58.9200745Z 2022-07-17T15:00:58.9201019Z Debian GNU/Linux 10 sonic ttyS0 2022-07-17T15:00:58.9201201Z 2022-07-17T15:00:58.9201542Z rc.local[307]: sonic+ onie_disco_subnet=255.255.255.0 login: 2022-07-17T15:00:58.9202309Z [ 26.079767] rc.local[307]: + onie_exec_url=file://dev/vdb/onie-installer.bin How I did it Input a newline when finished to run the script /etc/rc.local. If entering a newline, the message "sonic login:" will prompt again. --- check_install.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/check_install.py b/check_install.py index ecd3a8ee9a3b..b8fc3936f751 100755 --- a/check_install.py +++ b/check_install.py @@ -19,6 +19,7 @@ def main(): passwd_prompt = 'Password:' cmd_prompt = "{}@sonic:~\$ $".format(args.u) grub_selection = "The highlighted entry will be executed" + firsttime_prompt = 'firsttime_exit' i = 0 while True: @@ -38,13 +39,17 @@ def main(): # bootup sonic image while True: - i = p.expect([login_prompt, passwd_prompt, cmd_prompt]) + i = p.expect([login_prompt, passwd_prompt, firsttime_prompt, cmd_prompt]) if i == 0: # send user name p.sendline(args.u) elif i == 1: # send password p.sendline(args.P) + elif i == 2: + # fix a login timeout issue, caused by the login_prompt message mixed with the output message of the rc.local + time.sleep(1) + p.sendline() else: break From cf69206d020a4b905bdd2f408c974ed4fe8cc9a2 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Tue, 30 Aug 2022 14:23:09 +0800 Subject: [PATCH 06/52] [ci] Fix bug involved by PR 11810 which affect official build pipeline (#11891) Why I did it Fix the official build not triggered correctly issue, caused by the azp template path not existing. How I did it Change the azp template path. --- .azure-pipelines/official-build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.azure-pipelines/official-build.yml b/.azure-pipelines/official-build.yml index c73124f2e91f..3ebb106a621b 100644 --- a/.azure-pipelines/official-build.yml +++ b/.azure-pipelines/official-build.yml @@ -38,7 +38,7 @@ stages: variables: - name: CACHE_MODE value: wcache - - template: azure-pipelines-repd-build-variables.yml@buildimage + - template: .azure-pipelines/azure-pipelines-repd-build-variables.yml@buildimage jobs: - template: azure-pipelines-build.yml parameters: From 092e0394b5bb0e74daa8b984c4ee6da5fdaac625 Mon Sep 17 00:00:00 2001 From: Arun Saravanan Balachandran <52521751+ArunSaravananBalachandran@users.noreply.github.com> Date: Tue, 30 Aug 2022 23:53:52 +0530 Subject: [PATCH 07/52] DellEMC: Z9332f - Graceful platform reboot (#10240) Why I did it To gracefully unmount filesystems and stop containers while performing a cold reboot. Unmount ONIE-BOOT if mounted during fast/soft/warm reboot How I did it Override systemd-reboot service to perform a cold reboot. Unmount ONIE-BOOT if mounted using fast/soft/warm-reboot plugins. How to verify it On reboot, verify that the container stop and filesystem unmount services have completed execution before the platform reboot. --- .../debian/platform-modules-z9332f.install | 6 ++++- .../z9332f/scripts/fast-reboot_plugin | 8 +++++++ .../z9332f/scripts/override.conf | 3 +++ .../z9332f/scripts/platform_reboot_override | 23 +++++++++++++++++++ .../z9332f/scripts/soft-reboot_plugin | 1 + .../z9332f/scripts/warm-reboot_plugin | 1 + 6 files changed, 41 insertions(+), 1 deletion(-) create mode 100755 platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/fast-reboot_plugin create mode 100644 platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/override.conf create mode 100755 platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/platform_reboot_override create mode 120000 platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/soft-reboot_plugin create mode 120000 platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/warm-reboot_plugin diff --git a/platform/broadcom/sonic-platform-modules-dell/debian/platform-modules-z9332f.install b/platform/broadcom/sonic-platform-modules-dell/debian/platform-modules-z9332f.install index ad25015472da..ecbd88a6ffef 100644 --- a/platform/broadcom/sonic-platform-modules-dell/debian/platform-modules-z9332f.install +++ b/platform/broadcom/sonic-platform-modules-dell/debian/platform-modules-z9332f.install @@ -4,7 +4,11 @@ z9332f/scripts/sensors usr/bin z9332f/cfg/z9332f-modules.conf etc/modules-load.d z9332f/systemd/platform-modules-z9332f.service etc/systemd/system z9332f/modules/sonic_platform-1.0-py3-none-any.whl usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 -common/platform_reboot usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 +z9332f/scripts/platform_reboot_override usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 +z9332f/scripts/override.conf /etc/systemd/system/systemd-reboot.service.d +z9332f/scripts/fast-reboot_plugin usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 +z9332f/scripts/soft-reboot_plugin usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 +z9332f/scripts/warm-reboot_plugin usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0 common/pcisysfs.py usr/bin common/io_rd_wr.py usr/local/bin common/fw-updater usr/local/bin diff --git a/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/fast-reboot_plugin b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/fast-reboot_plugin new file mode 100755 index 000000000000..0335f71d02b5 --- /dev/null +++ b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/fast-reboot_plugin @@ -0,0 +1,8 @@ +#!/bin/bash + +ONIE_PATH="/mnt/onie-boot" + +# Unmount ONIE partition if mounted +if grep -qs ${ONIE_PATH} /proc/mounts; then + umount ${ONIE_PATH} +fi diff --git a/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/override.conf b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/override.conf new file mode 100644 index 000000000000..9f17da1c2335 --- /dev/null +++ b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/override.conf @@ -0,0 +1,3 @@ +[Service] +ExecStart= +ExecStart=/usr/share/sonic/device/x86_64-dellemc_z9332f_d1508-r0/platform_reboot_override diff --git a/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/platform_reboot_override b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/platform_reboot_override new file mode 100755 index 000000000000..ca04ac0635a7 --- /dev/null +++ b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/platform_reboot_override @@ -0,0 +1,23 @@ +#!/usr/bin/python3 +import os +import struct + +PORT_RES = '/dev/port' + + +def portio_reg_write(resource, offset, val): + fd = os.open(resource, os.O_RDWR) + if(fd < 0): + print('file open failed %s' % resource) + return + if(os.lseek(fd, offset, os.SEEK_SET) != offset): + print('lseek failed on %s' % resource) + return + ret = os.write(fd, struct.pack('B', val)) + if(ret != 1): + print('write failed %d' % ret) + return + os.close(fd) + +if __name__ == "__main__": + portio_reg_write(PORT_RES, 0xcf9, 0xe) diff --git a/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/soft-reboot_plugin b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/soft-reboot_plugin new file mode 120000 index 000000000000..180742bbc4d5 --- /dev/null +++ b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/soft-reboot_plugin @@ -0,0 +1 @@ +fast-reboot_plugin \ No newline at end of file diff --git a/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/warm-reboot_plugin b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/warm-reboot_plugin new file mode 120000 index 000000000000..180742bbc4d5 --- /dev/null +++ b/platform/broadcom/sonic-platform-modules-dell/z9332f/scripts/warm-reboot_plugin @@ -0,0 +1 @@ +fast-reboot_plugin \ No newline at end of file From 1e75abc274e019d12496ffd8c9e16a9600245ec6 Mon Sep 17 00:00:00 2001 From: saksarav-nokia Date: Tue, 30 Aug 2022 23:23:17 -0400 Subject: [PATCH 08/52] [Nokia][Nokia-IXR7250E-36x100G & Nokia-IXR7250E-36x400G] Update BCM (#11577) config to support ERSPAN egress mirror and also set flag to preserve ECN --- .../0/jr2cp-nokia-18x100g-4x25g-config.bcm | 44 +++++++++++++++++-- .../0/sai_postinit_cmd.soc | 37 +--------------- .../1/jr2cp-nokia-18x100g-4x25g-config.bcm | 44 +++++++++++++++++-- .../1/sai_postinit_cmd.soc | 7 +-- .../0/jr2cp-nokia-18x400g-config.bcm | 43 ++++++++++++++++-- .../0/sai_postinit_cmd.soc | 2 + .../1/jr2cp-nokia-18x400g-config.bcm | 43 ++++++++++++++++-- .../1/sai_postinit_cmd.soc | 2 + 8 files changed, 168 insertions(+), 54 deletions(-) diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm index 72c101facb35..3eb3ba20a424 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/jr2cp-nokia-18x100g-4x25g-config.bcm @@ -8,7 +8,7 @@ dma_desc_aggregator_chain_length_max.BCM8885X=1000 dma_desc_aggregator_enable_specific_MDB_LPM.BCM8885X=1 dma_desc_aggregator_timeout_usec.BCM8885X=1000 dport_map_direct.BCM8885X=1 - +sai_postinit_cmd_file=/usr/share/sonic/hwsku/sai_postinit_cmd.soc dtm_flow_mapping_mode_region_64.BCM8885X=3 dtm_flow_mapping_mode_region_65.BCM8885X=3 dtm_flow_mapping_mode_region_66.BCM8885X=3 @@ -1532,12 +1532,50 @@ ucode_port_15.BCM8885X=CGE6:core_0.15 ucode_port_16.BCM8885X=CGE4:core_0.16 ucode_port_17.BCM8885X=CGE2:core_0.17 ucode_port_18.BCM8885X=CGE0:core_0.18 - - ucode_port_19.BCM8885X=RCY0:core_0.19 ucode_port_20.BCM8885X=RCY1:core_1.20 ucode_port_21.BCM8885X=OLP:core_1.21 +ucode_port_100.BCM8885X=RCY_MIRROR.0:core_0.100 +ucode_port_101.BCM8885X=RCY_MIRROR.1:core_0.101 +ucode_port_102.BCM8885X=RCY_MIRROR.2:core_0.102 +ucode_port_103.BCM8885X=RCY_MIRROR.3:core_0.103 +ucode_port_104.BCM8885X=RCY_MIRROR.4:core_0.104 +ucode_port_105.BCM8885X=RCY_MIRROR.5:core_0.105 +ucode_port_106.BCM8885X=RCY_MIRROR.6:core_0.106 +ucode_port_107.BCM8885X=RCY_MIRROR.7:core_0.107 +ucode_port_108.BCM8885X=RCY_MIRROR.8:core_0.108 +ucode_port_109.BCM8885X=RCY_MIRROR.9:core_0.109 +ucode_port_110.BCM8885X=RCY_MIRROR.10:core_0.110 +ucode_port_111.BCM8885X=RCY_MIRROR.11:core_0.111 +ucode_port_112.BCM8885X=RCY_MIRROR.12:core_0.112 +ucode_port_113.BCM8885X=RCY_MIRROR.13:core_0.113 +ucode_port_114.BCM8885X=RCY_MIRROR.14:core_0.114 +ucode_port_115.BCM8885X=RCY_MIRROR.15:core_0.115 +ucode_port_116.BCM8885X=RCY_MIRROR.16:core_0.116 +ucode_port_117.BCM8885X=RCY_MIRROR.17:core_0.117 +ucode_port_118.BCM8885X=RCY_MIRROR.18:core_0.118 +ucode_port_119.BCM8885X=RCY_MIRROR.19:core_0.119 +ucode_port_120.BCM8885X=RCY_MIRROR.0:core_1.120 +ucode_port_121.BCM8885X=RCY_MIRROR.1:core_1.121 +ucode_port_122.BCM8885X=RCY_MIRROR.2:core_1.122 +ucode_port_123.BCM8885X=RCY_MIRROR.3:core_1.123 +ucode_port_124.BCM8885X=RCY_MIRROR.4:core_1.124 +ucode_port_125.BCM8885X=RCY_MIRROR.5:core_1.125 +ucode_port_126.BCM8885X=RCY_MIRROR.6:core_1.126 +ucode_port_127.BCM8885X=RCY_MIRROR.7:core_1.127 +ucode_port_128.BCM8885X=RCY_MIRROR.8:core_1.128 +ucode_port_129.BCM8885X=RCY_MIRROR.9:core_1.129 +ucode_port_130.BCM8885X=RCY_MIRROR.10:core_1.130 +ucode_port_131.BCM8885X=RCY_MIRROR.11:core_1.131 +ucode_port_132.BCM8885X=RCY_MIRROR.12:core_1.132 +ucode_port_133.BCM8885X=RCY_MIRROR.13:core_1.133 +ucode_port_134.BCM8885X=RCY_MIRROR.14:core_1.134 +ucode_port_135.BCM8885X=RCY_MIRROR.15:core_1.135 +ucode_port_136.BCM8885X=RCY_MIRROR.16:core_1.136 +ucode_port_137.BCM8885X=RCY_MIRROR.17:core_1.137 +ucode_port_138.BCM8885X=RCY_MIRROR.18:core_1.138 +ucode_port_139.BCM8885X=RCY_MIRROR.19:core_1.139 port_init_speed_1.BCM8885X=100000 port_init_speed_2.BCM8885X=100000 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/sai_postinit_cmd.soc b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/sai_postinit_cmd.soc index 26466f89ae44..20e19b8faebe 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/sai_postinit_cmd.soc +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/0/sai_postinit_cmd.soc @@ -1,36 +1 @@ -phy set 3 reg=0xd134 data=-8 lane=0 -phy set 3 reg=0xd135 data=132 lane=0 -phy set 3 reg=0xd136 data=-8 lane=0 -phy set 3 reg=0xd137 data=0 lane=0 -phy set 3 reg=0xd138 data=0 lane=0 -phy set 3 reg=0xd133 data=0x1802 lane=0 - -phy set 3 reg=0xd134 data=-8 lane=1 -phy set 3 reg=0xd135 data=132 lane=1 -phy set 3 reg=0xd136 data=-12 lane=1 -phy set 3 reg=0xd137 data=0 lane=1 -phy set 3 reg=0xd138 data=0 lane=1 -phy set 3 reg=0xd133 data=0x1800 lane=1 - -phy set 3 reg=0xd134 data=-8 lane=7 -phy set 3 reg=0xd135 data=132 lane=7 -phy set 3 reg=0xd136 data=-8 lane=7 -phy set 3 reg=0xd137 data=0 lane=7 -phy set 3 reg=0xd138 data=0 lane=7 -phy set 3 reg=0xd133 data=0x1804 lane=7 - - -phy set 6 reg=0xd134 data=-8 lane=1 -phy set 6 reg=0xd135 data=132 lane=1 -phy set 6 reg=0xd136 data=-8 lane=1 -phy set 6 reg=0xd137 data=0 lane=1 -phy set 6 reg=0xd138 data=0 lane=1 -phy set 6 reg=0xd133 data=0x1802 lane=1 - - -phy set 8 reg=0xd134 data=-8 lane=1 -phy set 8 reg=0xd135 data=132 lane=1 -phy set 8 reg=0xd136 data=-8 lane=1 -phy set 8 reg=0xd137 data=0 lane=1 -phy set 8 reg=0xd138 data=0 lane=1 -phy set 8 reg=0xd133 data=0x1802 lane=1 +mod ETPPC_MAP_FWD_QOS_DP_TO_TYPE_FWD 0 128 TYPE_FWD_KEEP_ECN_BITS=1 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm index cc6d41ea3da6..57e966b35315 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/jr2cp-nokia-18x100g-4x25g-config.bcm @@ -8,7 +8,7 @@ dma_desc_aggregator_chain_length_max.BCM8885X=1000 dma_desc_aggregator_enable_specific_MDB_LPM.BCM8885X=1 dma_desc_aggregator_timeout_usec.BCM8885X=1000 dport_map_direct.BCM8885X=1 - +sai_postinit_cmd_file=/usr/share/sonic/hwsku/sai_postinit_cmd.soc dtm_flow_mapping_mode_region_64.BCM8885X=3 dtm_flow_mapping_mode_region_65.BCM8885X=3 dtm_flow_mapping_mode_region_66.BCM8885X=3 @@ -1551,12 +1551,50 @@ ucode_port_15.BCM8885X=CGE6:core_0.15 ucode_port_16.BCM8885X=CGE4:core_0.16 ucode_port_17.BCM8885X=CGE2:core_0.17 ucode_port_18.BCM8885X=CGE0:core_0.18 - - ucode_port_19.BCM8885X=RCY0:core_0.19 ucode_port_20.BCM8885X=RCY1:core_1.20 ucode_port_21.BCM8885X=OLP:core_1.21 +ucode_port_100.BCM8885X=RCY_MIRROR.0:core_0.100 +ucode_port_101.BCM8885X=RCY_MIRROR.1:core_0.101 +ucode_port_102.BCM8885X=RCY_MIRROR.2:core_0.102 +ucode_port_103.BCM8885X=RCY_MIRROR.3:core_0.103 +ucode_port_104.BCM8885X=RCY_MIRROR.4:core_0.104 +ucode_port_105.BCM8885X=RCY_MIRROR.5:core_0.105 +ucode_port_106.BCM8885X=RCY_MIRROR.6:core_0.106 +ucode_port_107.BCM8885X=RCY_MIRROR.7:core_0.107 +ucode_port_108.BCM8885X=RCY_MIRROR.8:core_0.108 +ucode_port_109.BCM8885X=RCY_MIRROR.9:core_0.109 +ucode_port_110.BCM8885X=RCY_MIRROR.10:core_0.110 +ucode_port_111.BCM8885X=RCY_MIRROR.11:core_0.111 +ucode_port_112.BCM8885X=RCY_MIRROR.12:core_0.112 +ucode_port_113.BCM8885X=RCY_MIRROR.13:core_0.113 +ucode_port_114.BCM8885X=RCY_MIRROR.14:core_0.114 +ucode_port_115.BCM8885X=RCY_MIRROR.15:core_0.115 +ucode_port_116.BCM8885X=RCY_MIRROR.16:core_0.116 +ucode_port_117.BCM8885X=RCY_MIRROR.17:core_0.117 +ucode_port_118.BCM8885X=RCY_MIRROR.18:core_0.118 +ucode_port_119.BCM8885X=RCY_MIRROR.19:core_0.119 +ucode_port_120.BCM8885X=RCY_MIRROR.0:core_1.120 +ucode_port_121.BCM8885X=RCY_MIRROR.1:core_1.121 +ucode_port_122.BCM8885X=RCY_MIRROR.2:core_1.122 +ucode_port_123.BCM8885X=RCY_MIRROR.3:core_1.123 +ucode_port_124.BCM8885X=RCY_MIRROR.4:core_1.124 +ucode_port_125.BCM8885X=RCY_MIRROR.5:core_1.125 +ucode_port_126.BCM8885X=RCY_MIRROR.6:core_1.126 +ucode_port_127.BCM8885X=RCY_MIRROR.7:core_1.127 +ucode_port_128.BCM8885X=RCY_MIRROR.8:core_1.128 +ucode_port_129.BCM8885X=RCY_MIRROR.9:core_1.129 +ucode_port_130.BCM8885X=RCY_MIRROR.10:core_1.130 +ucode_port_131.BCM8885X=RCY_MIRROR.11:core_1.131 +ucode_port_132.BCM8885X=RCY_MIRROR.12:core_1.132 +ucode_port_133.BCM8885X=RCY_MIRROR.13:core_1.133 +ucode_port_134.BCM8885X=RCY_MIRROR.14:core_1.134 +ucode_port_135.BCM8885X=RCY_MIRROR.15:core_1.135 +ucode_port_136.BCM8885X=RCY_MIRROR.16:core_1.136 +ucode_port_137.BCM8885X=RCY_MIRROR.17:core_1.137 +ucode_port_138.BCM8885X=RCY_MIRROR.18:core_1.138 +ucode_port_139.BCM8885X=RCY_MIRROR.19:core_1.139 port_init_speed_1.BCM8885X=100000 port_init_speed_2.BCM8885X=100000 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/sai_postinit_cmd.soc b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/sai_postinit_cmd.soc index db5ad5ebb264..20e19b8faebe 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/sai_postinit_cmd.soc +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x100G/1/sai_postinit_cmd.soc @@ -1,6 +1 @@ -phy set 8 reg=0xd134 data=-8 lane=1 -phy set 8 reg=0xd135 data=132 lane=1 -phy set 8 reg=0xd136 data=-8 lane=1 -phy set 8 reg=0xd137 data=0 lane=1 -phy set 8 reg=0xd138 data=0 lane=1 -phy set 8 reg=0xd133 data=0x1802 lane=1 +mod ETPPC_MAP_FWD_QOS_DP_TO_TYPE_FWD 0 128 TYPE_FWD_KEEP_ECN_BITS=1 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm index 5303bb263ba2..1da65733155a 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/jr2cp-nokia-18x400g-config.bcm @@ -1551,13 +1551,50 @@ ucode_port_15.BCM8885X=CDGE3:core_0.15 ucode_port_16.BCM8885X=CDGE2:core_0.16 ucode_port_17.BCM8885X=CDGE1:core_0.17 ucode_port_18.BCM8885X=CDGE0:core_0.18 - - ucode_port_19.BCM8885X=RCY0:core_0.19 ucode_port_20.BCM8885X=RCY1:core_1.20 ucode_port_21.BCM8885X=OLP:core_1.21 - +ucode_port_100.BCM8885X=RCY_MIRROR.0:core_0.100 +ucode_port_101.BCM8885X=RCY_MIRROR.1:core_0.101 +ucode_port_102.BCM8885X=RCY_MIRROR.2:core_0.102 +ucode_port_103.BCM8885X=RCY_MIRROR.3:core_0.103 +ucode_port_104.BCM8885X=RCY_MIRROR.4:core_0.104 +ucode_port_105.BCM8885X=RCY_MIRROR.5:core_0.105 +ucode_port_106.BCM8885X=RCY_MIRROR.6:core_0.106 +ucode_port_107.BCM8885X=RCY_MIRROR.7:core_0.107 +ucode_port_108.BCM8885X=RCY_MIRROR.8:core_0.108 +ucode_port_109.BCM8885X=RCY_MIRROR.9:core_0.109 +ucode_port_110.BCM8885X=RCY_MIRROR.10:core_0.110 +ucode_port_111.BCM8885X=RCY_MIRROR.11:core_0.111 +ucode_port_112.BCM8885X=RCY_MIRROR.12:core_0.112 +ucode_port_113.BCM8885X=RCY_MIRROR.13:core_0.113 +ucode_port_114.BCM8885X=RCY_MIRROR.14:core_0.114 +ucode_port_115.BCM8885X=RCY_MIRROR.15:core_0.115 +ucode_port_116.BCM8885X=RCY_MIRROR.16:core_0.116 +ucode_port_117.BCM8885X=RCY_MIRROR.17:core_0.117 +ucode_port_118.BCM8885X=RCY_MIRROR.18:core_0.118 +ucode_port_119.BCM8885X=RCY_MIRROR.19:core_0.119 +ucode_port_120.BCM8885X=RCY_MIRROR.0:core_1.120 +ucode_port_121.BCM8885X=RCY_MIRROR.1:core_1.121 +ucode_port_122.BCM8885X=RCY_MIRROR.2:core_1.122 +ucode_port_123.BCM8885X=RCY_MIRROR.3:core_1.123 +ucode_port_124.BCM8885X=RCY_MIRROR.4:core_1.124 +ucode_port_125.BCM8885X=RCY_MIRROR.5:core_1.125 +ucode_port_126.BCM8885X=RCY_MIRROR.6:core_1.126 +ucode_port_127.BCM8885X=RCY_MIRROR.7:core_1.127 +ucode_port_128.BCM8885X=RCY_MIRROR.8:core_1.128 +ucode_port_129.BCM8885X=RCY_MIRROR.9:core_1.129 +ucode_port_130.BCM8885X=RCY_MIRROR.10:core_1.130 +ucode_port_131.BCM8885X=RCY_MIRROR.11:core_1.131 +ucode_port_132.BCM8885X=RCY_MIRROR.12:core_1.132 +ucode_port_133.BCM8885X=RCY_MIRROR.13:core_1.133 +ucode_port_134.BCM8885X=RCY_MIRROR.14:core_1.134 +ucode_port_135.BCM8885X=RCY_MIRROR.15:core_1.135 +ucode_port_136.BCM8885X=RCY_MIRROR.16:core_1.136 +ucode_port_137.BCM8885X=RCY_MIRROR.17:core_1.137 +ucode_port_138.BCM8885X=RCY_MIRROR.18:core_1.138 +ucode_port_139.BCM8885X=RCY_MIRROR.19:core_1.139 serdes_lane_config_dfe_1.BCM8885X=on serdes_lane_config_dfe_2.BCM8885X=on diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/sai_postinit_cmd.soc b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/sai_postinit_cmd.soc index 650134e7e589..fd18216d3c84 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/sai_postinit_cmd.soc +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/0/sai_postinit_cmd.soc @@ -35,3 +35,5 @@ phy set 17 reg=0xd136 data=-16 lane=2 phy set 17 reg=0xd137 data=0 lane=2 phy set 17 reg=0xd138 data=0 lane=2 phy set 17 reg=0xd133 data=0x1804 lane=2 + +mod ETPPC_MAP_FWD_QOS_DP_TO_TYPE_FWD 0 128 TYPE_FWD_KEEP_ECN_BITS=1 diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm index eb58c1ca42e1..4d6790d5398b 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/jr2cp-nokia-18x400g-config.bcm @@ -1551,13 +1551,50 @@ ucode_port_15.BCM8885X=CDGE3:core_0.15 ucode_port_16.BCM8885X=CDGE2:core_0.16 ucode_port_17.BCM8885X=CDGE1:core_0.17 ucode_port_18.BCM8885X=CDGE0:core_0.18 - - ucode_port_19.BCM8885X=RCY0:core_0.19 ucode_port_20.BCM8885X=RCY1:core_1.20 ucode_port_21.BCM8885X=OLP:core_1.21 - +ucode_port_100.BCM8885X=RCY_MIRROR.0:core_0.100 +ucode_port_101.BCM8885X=RCY_MIRROR.1:core_0.101 +ucode_port_102.BCM8885X=RCY_MIRROR.2:core_0.102 +ucode_port_103.BCM8885X=RCY_MIRROR.3:core_0.103 +ucode_port_104.BCM8885X=RCY_MIRROR.4:core_0.104 +ucode_port_105.BCM8885X=RCY_MIRROR.5:core_0.105 +ucode_port_106.BCM8885X=RCY_MIRROR.6:core_0.106 +ucode_port_107.BCM8885X=RCY_MIRROR.7:core_0.107 +ucode_port_108.BCM8885X=RCY_MIRROR.8:core_0.108 +ucode_port_109.BCM8885X=RCY_MIRROR.9:core_0.109 +ucode_port_110.BCM8885X=RCY_MIRROR.10:core_0.110 +ucode_port_111.BCM8885X=RCY_MIRROR.11:core_0.111 +ucode_port_112.BCM8885X=RCY_MIRROR.12:core_0.112 +ucode_port_113.BCM8885X=RCY_MIRROR.13:core_0.113 +ucode_port_114.BCM8885X=RCY_MIRROR.14:core_0.114 +ucode_port_115.BCM8885X=RCY_MIRROR.15:core_0.115 +ucode_port_116.BCM8885X=RCY_MIRROR.16:core_0.116 +ucode_port_117.BCM8885X=RCY_MIRROR.17:core_0.117 +ucode_port_118.BCM8885X=RCY_MIRROR.18:core_0.118 +ucode_port_119.BCM8885X=RCY_MIRROR.19:core_0.119 +ucode_port_120.BCM8885X=RCY_MIRROR.0:core_1.120 +ucode_port_121.BCM8885X=RCY_MIRROR.1:core_1.121 +ucode_port_122.BCM8885X=RCY_MIRROR.2:core_1.122 +ucode_port_123.BCM8885X=RCY_MIRROR.3:core_1.123 +ucode_port_124.BCM8885X=RCY_MIRROR.4:core_1.124 +ucode_port_125.BCM8885X=RCY_MIRROR.5:core_1.125 +ucode_port_126.BCM8885X=RCY_MIRROR.6:core_1.126 +ucode_port_127.BCM8885X=RCY_MIRROR.7:core_1.127 +ucode_port_128.BCM8885X=RCY_MIRROR.8:core_1.128 +ucode_port_129.BCM8885X=RCY_MIRROR.9:core_1.129 +ucode_port_130.BCM8885X=RCY_MIRROR.10:core_1.130 +ucode_port_131.BCM8885X=RCY_MIRROR.11:core_1.131 +ucode_port_132.BCM8885X=RCY_MIRROR.12:core_1.132 +ucode_port_133.BCM8885X=RCY_MIRROR.13:core_1.133 +ucode_port_134.BCM8885X=RCY_MIRROR.14:core_1.134 +ucode_port_135.BCM8885X=RCY_MIRROR.15:core_1.135 +ucode_port_136.BCM8885X=RCY_MIRROR.16:core_1.136 +ucode_port_137.BCM8885X=RCY_MIRROR.17:core_1.137 +ucode_port_138.BCM8885X=RCY_MIRROR.18:core_1.138 +ucode_port_139.BCM8885X=RCY_MIRROR.19:core_1.139 serdes_lane_config_dfe_1.BCM8885X=on serdes_lane_config_dfe_2.BCM8885X=on diff --git a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/sai_postinit_cmd.soc b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/sai_postinit_cmd.soc index b22dde093132..109b18ecaaf2 100644 --- a/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/sai_postinit_cmd.soc +++ b/device/nokia/x86_64-nokia_ixr7250e_36x400g-r0/Nokia-IXR7250E-36x400G/1/sai_postinit_cmd.soc @@ -12,3 +12,5 @@ phy set 8 reg=0xd136 data=-8 lane=1 phy set 8 reg=0xd137 data=0 lane=1 phy set 8 reg=0xd138 data=0 lane=1 phy set 8 reg=0xd133 data=0x1802 lane=1 + +mod ETPPC_MAP_FWD_QOS_DP_TO_TYPE_FWD 0 128 TYPE_FWD_KEEP_ECN_BITS=1 From 402714723805baff2d49c053a9513a1ab6519f1e Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Wed, 31 Aug 2022 10:48:15 -0700 Subject: [PATCH 09/52] Align API get_device_runtime_metadata() for python version < 3.9 (#11900) Why I did it: API get_device_runtime_metadata() added by #11795 uses merge operator for dict but that is supported only for python version >=3.9. This API will be be used by scrips eg:hostcfgd which is still build for buster which does not have python 3.9 support. --- src/sonic-py-common/sonic_py_common/device_info.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/sonic-py-common/sonic_py_common/device_info.py b/src/sonic-py-common/sonic_py_common/device_info.py index 6605c798ec1a..8173c2677275 100644 --- a/src/sonic-py-common/sonic_py_common/device_info.py +++ b/src/sonic-py-common/sonic_py_common/device_info.py @@ -476,7 +476,10 @@ def get_device_runtime_metadata(): 'chassis_type': 'voq' if is_voq_chassis() else 'packet'}} port_metadata = {'ETHERNET_PORTS_PRESENT': True if get_path_to_port_config_file(hwsku=None, asic="0" if is_multi_npu() else None) else False} - return {'DEVICE_RUNTIME_METADATA': chassis_metadata | port_metadata } + runtime_metadata = {} + runtime_metadata.update(chassis_metadata) + runtime_metadata.update(port_metadata) + return {'DEVICE_RUNTIME_METADATA': runtime_metadata } def get_npu_id_from_name(npu_name): if npu_name.startswith(NPU_NAME_PREFIX): From c601f241396bc4f4f23082fc50311fd5f69fe1a8 Mon Sep 17 00:00:00 2001 From: Dev Ojha <47282568+developfast@users.noreply.github.com> Date: Wed, 31 Aug 2022 11:08:32 -0700 Subject: [PATCH 10/52] [Arista7050cx3] TD3 SKU changes for pg headroom value after interop testing with cisco 8102 (#11901) Why I did it After PFC interop testing between 8102 and 7050cx3, data packet losses were observed on the Rx ports of the 7050cx3 (inflow from 8102) during testing. This was primarily due to the slower response times to react to PFC pause packets for the 8102, when receiving such frames from neighboring devices. To solve for the packet drops, the 7050cx3 pg headroom size has to be increased to 160kB. How I did it Modified the xoff threshold value to 160kB in the pg_profile file to allow for the buffer manager to read that value when building the image, and configuring the device How to verify it run "mmuconfig -l" once image is built Signed-off-by: dojha --- .../Arista-7050CX3-32S-C32/pg_profile_lookup.ini | 12 ++++++------ .../Arista-7050CX3-32S-D48C8/pg_profile_lookup.ini | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-C32/pg_profile_lookup.ini b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-C32/pg_profile_lookup.ini index dd405301f720..5b4482bc74c7 100644 --- a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-C32/pg_profile_lookup.ini +++ b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-C32/pg_profile_lookup.ini @@ -1,8 +1,8 @@ # PG lossless profiles. # speed cable size xon xoff threshold xon_offset - 50000 5m 4608 4608 33792 0 4608 - 100000 5m 4608 4608 49408 0 4608 - 50000 40m 4608 4608 36352 0 4608 - 100000 40m 4608 4608 54528 0 4608 - 50000 300m 4608 4608 55296 0 4608 - 100000 300m 4608 4608 92672 0 4608 + 50000 5m 4608 4608 160000 0 4608 + 100000 5m 4608 4608 160000 0 4608 + 50000 40m 4608 4608 160000 0 4608 + 100000 40m 4608 4608 160000 0 4608 + 50000 300m 4608 4608 160000 0 4608 + 100000 300m 4608 4608 160000 0 4608 diff --git a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/pg_profile_lookup.ini b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/pg_profile_lookup.ini index 8ee7a6714b1e..5b4482bc74c7 100644 --- a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/pg_profile_lookup.ini +++ b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/pg_profile_lookup.ini @@ -1,8 +1,8 @@ # PG lossless profiles. # speed cable size xon xoff threshold xon_offset - 50000 5m 4608 4608 79872 0 4608 - 100000 5m 4608 4608 54528 0 4608 - 50000 40m 4608 4608 39936 0 4608 - 100000 40m 4608 4608 60416 0 4608 - 50000 300m 4608 4608 61440 0 4608 - 100000 300m 4608 4608 103680 0 4608 + 50000 5m 4608 4608 160000 0 4608 + 100000 5m 4608 4608 160000 0 4608 + 50000 40m 4608 4608 160000 0 4608 + 100000 40m 4608 4608 160000 0 4608 + 50000 300m 4608 4608 160000 0 4608 + 100000 300m 4608 4608 160000 0 4608 From 46292d71bee9378460ffed833867bf12ae10090b Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Thu, 1 Sep 2022 04:09:36 +0800 Subject: [PATCH 11/52] Add linux perf tool to sonic image (#11906) --- build_debian.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build_debian.sh b/build_debian.sh index 1db8f3b91ca1..8dcd8596f684 100755 --- a/build_debian.sh +++ b/build_debian.sh @@ -385,7 +385,8 @@ sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y in fdisk \ gpg \ jq \ - auditd + auditd \ + linux-perf # default rsyslog version is 8.2110.0 which has a bug on log rate limit, # use backport version From 6e878a36da87082788ee737a624ff66d99c86167 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Thu, 1 Sep 2022 04:10:22 +0800 Subject: [PATCH 12/52] [mux] Exit to write `standby` state to `active-active` ports (#11821) [mux] Exit to write standby state to `active-active` ports Signed-off-by: Longxiang Lyu --- files/build_templates/mux.service.j2 | 2 +- files/scripts/write_standby.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/files/build_templates/mux.service.j2 b/files/build_templates/mux.service.j2 index abc04ab30d18..ee1f8ce75389 100644 --- a/files/build_templates/mux.service.j2 +++ b/files/build_templates/mux.service.j2 @@ -14,7 +14,7 @@ ExecStartPre=/usr/local/bin/mark_dhcp_packet.py ExecStartPre=/usr/bin/{{docker_container_name}}.sh start ExecStart=/usr/bin/{{docker_container_name}}.sh wait ExecStop=/usr/bin/{{docker_container_name}}.sh stop -ExecStopPost=/usr/local/bin/write_standby.py +ExecStopPost=/usr/local/bin/write_standby.py --shutdown mux Restart=always RestartSec=30 diff --git a/files/scripts/write_standby.py b/files/scripts/write_standby.py index 86d50737b1c5..85b6ee8b04e8 100755 --- a/files/scripts/write_standby.py +++ b/files/scripts/write_standby.py @@ -178,6 +178,12 @@ def apply_mux_config(self): parser.add_argument('-s', '--active_standby', help='state: intial state for "auto" and/or "manual" config in active-standby mode, default "standby"', type=str, required=False, default='standby') + parser.add_argument('--shutdown', help='write mux state after shutdown other services, supported: mux', + type=str, required=False, choices=['mux']) args = parser.parse_args() - mux_writer = MuxStateWriter(activeactive=args.active_active, activestandby=args.active_standby) + active_active_state = args.active_active + active_standby_state = args.active_standby + if args.shutdown == 'mux': + active_active_state = "standby" + mux_writer = MuxStateWriter(activeactive=active_active_state, activestandby=active_standby_state) mux_writer.apply_mux_config() From 353b2742b23f2529af9733ddca0c712c6f68a17b Mon Sep 17 00:00:00 2001 From: Jing Kan <672454911@qq.com> Date: Thu, 1 Sep 2022 07:58:16 +0800 Subject: [PATCH 13/52] [YANG] Create YANG Model for Console (#11806) How I did it Create YANG Model for SONiC console related features. How to verify it Add tests Signed-off-by: Jing Kan jika@microsoft.com --- src/sonic-yang-models/doc/Configuration.md | 24 +++++ src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 17 ++++ .../tests/yang_model_tests/tests/console.json | 40 +++++++++ .../tests_config/console.json | 88 +++++++++++++++++++ .../yang-models/sonic-console.yang | 76 ++++++++++++++++ 6 files changed, 246 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/console.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/console.json create mode 100644 src/sonic-yang-models/yang-models/sonic-console.yang diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index 2167cab027ed..118fca806d0d 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -19,6 +19,7 @@ Table of Contents * [Buffer port egress profile list](#buffer-port-egress-profile-list) * [Cable length](#cable-length) * [COPP_TABLE](#copp_table) + * [Console](#console) * [CRM](#crm) * [Data Plane L3 Interfaces](#data-plane-l3-interfaces) * [DEFAULT_LOSSLESS_BUFFER_PARAMETER](#DEFAULT_LOSSLESS_BUFFER_PARAMETER) @@ -687,6 +688,29 @@ This kind of profiles will be handled by buffer manager and won't be applied to } ``` +### Console + +``` +{ +"CONSOLE_PORT": { + "1": { + "baud_rate": "115200", + "flow_control": "0", + "remote_device": "host-1" + }, + "2": { + "baud_rate": "9600", + "flow_control": "1" + } + }, +"CONSOLE_SWITCH": { + "console_mgmt": { + "enabled": "yes" + } + } +} +``` + ### CRM ``` diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index f5ff5b5fe5f2..790143053260 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -98,6 +98,7 @@ def run(self): './yang-models/sonic-buffer-queue.yang', './yang-models/sonic-cable-length.yang', './yang-models/sonic-copp.yang', + './yang-models/sonic-console.yang', './yang-models/sonic-crm.yang', './yang-models/sonic-default-lossless-buffer-parameter.yang', './yang-models/sonic-device_metadata.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index c7d199fa9f24..44724f00d5d6 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1964,6 +1964,23 @@ "default_dynamic_th": "0", "over_subscribe_ratio": "0" } + }, + + "CONSOLE_PORT": { + "1": { + "baud_rate": "115200", + "flow_control": "0", + "remote_device": "host-1" + }, + "2": { + "baud_rate": "9600", + "flow_control": "1" + } + }, + "CONSOLE_SWITCH": { + "console_mgmt": { + "enabled": "yes" + } } }, "SAMPLE_CONFIG_DB_UNKNOWN": { diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/console.json b/src/sonic-yang-models/tests/yang_model_tests/tests/console.json new file mode 100644 index 000000000000..cd305e4a2086 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/console.json @@ -0,0 +1,40 @@ +{ + "CONSOLE_DISABLED": { + "desc": "Verifying CONSOLE_SWITCH configuration." + }, + "CONSOLE_DEFAULT_CONSOLE_MGMT": { + "desc": "CONSOLE_SWITCH default value for console_mgmt enabled field.", + "eStrKey": "Verify", + "verify": { + "xpath": "/sonic-console:sonic-console/CONSOLE_SWITCH/console_mgmt/enabled", + "key": "sonic-console:enabled", + "value": "no" + } + }, + "CONSOLE_DISABLED_INCORRECT_PATTERN": { + "desc": "CONSOLE_SWITCH configuration pattern failure.", + "eStrKey": "Pattern" + }, + "CONSOLE_PORT_DEFAULT_FLOW_CONTROL": { + "desc": "CONSOLE_PORT default value for flow_control field.", + "eStrKey": "Verify", + "verify": { + "xpath": "/sonic-console:sonic-console/CONSOLE_PORT/CONSOLE_PORT_LIST[name='1']/flow_control", + "key": "sonic-console:flow_control", + "value": "0" + } + }, + "CONSOLE_PORT_INVALID_NAME": { + "desc": "CONSOLE_PORT invalid name failure.", + "eStrKey": "InvalidValue", + "eStr": ["name"] + }, + "CONSOLE_PORT_INVALID_BAUD": { + "desc": "CONSOLE_PORT invalid baud failure.", + "eStrKey": "InvalidValue", + "eStr": ["baud"] + }, + "CONSOLE_PORT_VALID": { + "desc": "Verifying CONSOLE_PORT configuration no failure." + } +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/console.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/console.json new file mode 100644 index 000000000000..1ccfb4a3ae11 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/console.json @@ -0,0 +1,88 @@ +{ + "CONSOLE_DISABLED": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_SWITCH": { + "sonic-console:console_mgmt": { + "enabled": "no" + } + } + } + }, + "CONSOLE_DEFAULT_CONSOLE_MGMT": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_SWITCH": { + "sonic-console:console_mgmt": { + } + } + } + }, + "CONSOLE_DISABLED_INCORRECT_PATTERN": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_SWITCH": { + "sonic-console:console_mgmt": { + "enabled": "false" + } + } + } + }, + "CONSOLE_PORT_DEFAULT_FLOW_CONTROL": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_PORT": { + "CONSOLE_PORT_LIST": [ + { + "name": "1", + "baud_rate": "9600" + } + ] + } + } + }, + "CONSOLE_PORT_INVALID_NAME": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_PORT": { + "CONSOLE_PORT_LIST": [ + { + "name": "invalid", + "baud_rate": "9600" + } + ] + } + } + }, + "CONSOLE_PORT_INVALID_BAUD": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_PORT": { + "CONSOLE_PORT_LIST": [ + { + "name": "1", + "baud_rate": "invalid" + } + ] + } + } + }, + "CONSOLE_PORT_VALID": { + "sonic-console:sonic-console": { + "sonic-console:CONSOLE_PORT": { + "CONSOLE_PORT_LIST": [ + { + "name": "1", + "baud_rate": "9600", + "flow_control": "1", + "remote_device": "remote_host_1" + }, + { + "name": "2", + "baud_rate": "9600", + "flow_control": "0", + "remote_device": "remote_host_2" + }, + { + "name": "3", + "baud_rate": "9600" + } + ] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-console.yang b/src/sonic-yang-models/yang-models/sonic-console.yang new file mode 100644 index 000000000000..ed0af5390f49 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-console.yang @@ -0,0 +1,76 @@ +module sonic-console { + yang-version 1.1; + namespace "http://github.com/Azure/sonic-console"; + prefix console; + + import sonic-types { + prefix stypes; + } + + description "SONiC CONSOLE"; + + revision 2022-08-22 { + description "First Revision"; + } + + typedef console-mgmt-enabled { + description "configuration to set if console switch is enabled or not"; + type string { + pattern "yes|no"; + } + } + + typedef console-flow-control { + description "configuration to set if enable flow control on a console port"; + type string { + pattern "0|1"; + } + } + + container sonic-console { + container CONSOLE_PORT { + description "CONSOLE_PORT part of config_db.json"; + + list CONSOLE_PORT_LIST { + key "name"; + + leaf name { + description "Configure console port name"; + type uint16; + } + + leaf baud_rate { + description "Configure baud rate"; + type uint32; + } + + leaf flow_control { + description "Configure if enable/disable flow control"; + type console-flow-control; + default "0"; + } + + leaf remote_device { + description "Configure the remote device name"; + type stypes:hostname; + } + } + + } /* end of container CONSOLE_PORT */ + + container CONSOLE_SWITCH { + description "CONSOLE_SWITCH part of config_db.json"; + + container console_mgmt { + leaf enabled { + description "This configuration indicate if enable console management feature on SONiC"; + type console-mgmt-enabled; + default "no"; + } + } + + } /* end of container CONSOLE_SWITCH */ + + } /* end of top level container */ + +} /* end of module sonic-console */ From 88191b063bd40e27c1a7ce817487a2a7cc4e70aa Mon Sep 17 00:00:00 2001 From: Zhaohui Sun <94606222+ZhaohuiS@users.noreply.github.com> Date: Thu, 1 Sep 2022 08:13:24 +0800 Subject: [PATCH 14/52] Add python-is-python3 package for bullseye base docker (#11895) Why I did it In latest syncd container, it is installed bullseye, can't find command '/usr/bin/python'. Some scripts such as test_copp still calls /usr/bin/python in syncd. Submitted the change in #11807 for syncd docker, but it's better to add it in bullseye base docker. How I did it Install python-is-python3 package in bullseye base docker to resolve this issue, whatever run python or python3, it will run /usr/bin/python3, will not cause the error of can't find command '/usr/bin/python' How to verify it run python in syncd container. Signed-off-by: Zhaohui Sun --- dockers/docker-base-bullseye/Dockerfile.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/dockers/docker-base-bullseye/Dockerfile.j2 b/dockers/docker-base-bullseye/Dockerfile.j2 index d08316dbb4df..8d197d3c9011 100644 --- a/dockers/docker-base-bullseye/Dockerfile.j2 +++ b/dockers/docker-base-bullseye/Dockerfile.j2 @@ -48,6 +48,7 @@ RUN apt-get update && \ python3 \ python3-distutils \ python3-pip \ + python-is-python3 \ vim-tiny \ # Install redis-tools redis-tools \ From fdd9130ecf45b15ec8675f6afddb74895fabc276 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Thu, 1 Sep 2022 11:52:02 -0700 Subject: [PATCH 15/52] [YANG] Add MUX_CABLE yang model (#11797) Why I did it Address issue #10970 sign-off: Jing Zhang zhangjing@microsoft.com How I did it Add sonic-mux-cable.yang and unit tests. How to verify it Compile Compile target/python-wheels/sonic_yang_mgmt-1.0-py3-none-any.whl and target/python-wheels/sonic_yang_models-1.0-py3-none-any.whl. Pass sonic-config-engine unit test. Which release branch to backport (provide reason below if selected) 201811 201911 202006 202012 202106 202111 202205 Description for the changelog Link to config_db schema for YANG module changes https://github.com/sonic-net/sonic-buildimage/blob/f8fe41a0238b8a7b9e32ae42262f41b63050c55f/src/sonic-yang-models/doc/Configuration.md#mux_cable --- src/sonic-yang-models/doc/Configuration.md | 21 ++++- src/sonic-yang-models/setup.py | 1 + .../tests/files/sample_config_db.json | 16 ++++ .../yang_model_tests/tests/mux_cable.json | 16 ++++ .../tests_config/mux_cable.json | 93 +++++++++++++++++++ .../yang-models/sonic-mux-cable.yang | 92 ++++++++++++++++++ 6 files changed, 238 insertions(+), 1 deletion(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/mux_cable.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/mux_cable.json create mode 100644 src/sonic-yang-models/yang-models/sonic-mux-cable.yang diff --git a/src/sonic-yang-models/doc/Configuration.md b/src/sonic-yang-models/doc/Configuration.md index 118fca806d0d..bd7b51ce5ae2 100644 --- a/src/sonic-yang-models/doc/Configuration.md +++ b/src/sonic-yang-models/doc/Configuration.md @@ -35,9 +35,10 @@ Table of Contents * [Management port](#management-port) * [Management VRF](#management-vrf) * [MAP_PFC_PRIORITY_TO_QUEUE](#map_pfc_priority_to_queue) + * [MUX_CABLE](#muxcable) * [NTP Global Configuration](#ntp-global-configuration) * [NTP and SYSLOG servers](#ntp-and-syslog-servers) - * [Peer Switch](#peer-switch) + * [Peer Switch](#peer-switch) * [Policer](#policer) * [Port](#port) * [Port Channel](#port-channel) @@ -1090,6 +1091,24 @@ instead of data network. } } ``` +### MUX_CABLE + +The **MUX_CABLE** table is used for dualtor interface configuration. The `cable_type` and `soc_ipv4` objects are optional. + +``` +{ + "MUX_CABLE": { + "Ethernet4": { + "cable_type": "active-active", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::30/128", + "soc_ipv4": "192.168.0.3/32", + "state": "auto" + } + } +} +``` + ### NTP Global Configuration These configuration options are used to modify the way that diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 790143053260..975b84b68245 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -116,6 +116,7 @@ def run(self): './yang-models/sonic-mgmt_port.yang', './yang-models/sonic-mgmt_vrf.yang', './yang-models/sonic-mirror-session.yang', + './yang-models/sonic-mux-cable.yang', './yang-models/sonic-ntp.yang', './yang-models/sonic-nat.yang', './yang-models/sonic-nvgre-tunnel.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index 44724f00d5d6..b833fab06ddd 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1804,6 +1804,22 @@ } }, + "MUX_CABLE": { + "Ethernet4": { + "cable_type": "active-active", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::30/128", + "soc_ipv4": "192.168.0.3/32", + "state": "auto" + }, + "Ethernet0": { + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::30/128", + "state": "auto" + } + }, + + "POLICER": { "everflow_static_policer": { "meter_type": "bytes", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/mux_cable.json b/src/sonic-yang-models/tests/yang_model_tests/tests/mux_cable.json new file mode 100644 index 000000000000..0d02097fdf63 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/mux_cable.json @@ -0,0 +1,16 @@ +{ + "MUX_CABLE_ACTIVE_STANDBY_INTERFACE": { + "desc": "Load MUX_CABLE for active-standby interface." + }, + "MUX_CABLE_ACTIVE_ACTIVE_INTERFACE": { + "desc":"Load MUX_CABLE for active-active interface." + }, + "MUX_CABLE_INVALID_STATE": { + "desc": "Load MUX_CABLE with invalid state.", + "eStrKey": "InvalidValue" + }, + "MUX_CABLE_INVALID_IP": { + "desc": "Load MUX_CABLE with invalid server ip address.", + "eStrKey": "Pattern" + } +} \ No newline at end of file diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/mux_cable.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/mux_cable.json new file mode 100644 index 000000000000..815171306bdc --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/mux_cable.json @@ -0,0 +1,93 @@ +{ + "MUX_CABLE_ACTIVE_STANDBY_INTERFACE": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "lanes": "65", + "mtu": "9000", + "name": "Ethernet0", + "tpid": "0x8100", + "speed": "25000" + } + ] + } + }, + "sonic-mux-cable:sonic-mux-cable": { + "sonic-mux-cable:MUX_CABLE": { + "MUX_CABLE_LIST": [ + { + "ifname": "Ethernet0", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::30/128", + "state": "auto" + } + ] + + } + } + }, + + "MUX_CABLE_ACTIVE_ACTIVE_INTERFACE": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet4", + "lanes": "65", + "mtu": "9000", + "name": "Ethernet4", + "tpid": "0x8100", + "speed": "25000" + } + ] + } + }, + "sonic-mux-cable:sonic-mux-cable": { + "sonic-mux-cable:MUX_CABLE": { + "MUX_CABLE_LIST": [ + { + "ifname": "Ethernet4", + "cable_type": "active-active", + "server_ipv4": "192.168.0.2/32", + "server_ipv6": "fc02:1000::30/128", + "soc_ipv4": "192.168.0.3/32", + "state": "auto" + } + ] + + } + } + }, + + "MUX_CABLE_INVALID_STATE": { + "sonic-mux-cable:sonic-mux-cable": { + "sonic-mux-cable:MUX_CABLE": { + "MUX_CABLE_LIST": [ + { + "state": "Standby" + } + ] + + } + } + }, + + "MUX_CABLE_INVALID_IP": { + "sonic-mux-cable:sonic-mux-cable": { + "sonic-mux-cable:MUX_CABLE": { + "MUX_CABLE_LIST": [ + { + "server_ipv4": "999.999.999.999/32" + } + ] + + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-mux-cable.yang b/src/sonic-yang-models/yang-models/sonic-mux-cable.yang new file mode 100644 index 000000000000..a66a588c91da --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-mux-cable.yang @@ -0,0 +1,92 @@ +module sonic-mux-cable { + namespace "http://github.com/Azure/sonic-mux-cable"; + prefix mux_cable; + yang-version 1.1; + + import ietf-inet-types { + prefix inet; + } + + import sonic-port { + prefix prt; + } + + organization + "SONiC"; + + contact + "SONiC"; + + description + "SONiC DualToR MUX CABLE confifuration data"; + + revision 2022-08-19 { + description + "Initial revision"; + } + + container sonic-mux-cable { + + container MUX_CABLE { + + list MUX_CABLE_LIST { + + key "ifname"; + + leaf ifname { + type leafref { + path "/prt:sonic-port/prt:PORT/prt:PORT_LIST/prt:name"; + } + description + "Reference of port on which MUX cable to be configured."; + } + + leaf cable_type { + type enumeration { + enum active-active; + enum active-standby; + } + default active-standby; + description "SONiC DualToR interface cable type."; + } + + leaf server_ipv4 { + type inet:ipv4-prefix; + + description "Server IPv4 Address."; + } + + leaf server_ipv6 { + type inet:ipv6-prefix; + + description "Server IPv6 Address."; + } + + leaf soc_ipv4 { + type inet:ipv4-prefix; + + description "SoC IPv4 address. Optional and for active-active ports only. "; + } + + leaf soc_ipv6 { + type inet:ipv6-prefix; + + description "SoC IPv6 address. Optional and for active-active ports only. "; + } + + leaf state { + type enumeration { + enum auto; + enum manual; + enum detach; + enum active; + enum standby; + } + + default auto; + description "MUX mode determining if auto failover is enabled. "; + } + } + } + } +} From f82c1fd8ae992ffd4dd9791f064f91ea499026eb Mon Sep 17 00:00:00 2001 From: arunlk-dell <83708154+arunlk-dell@users.noreply.github.com> Date: Fri, 2 Sep 2022 05:25:41 +0530 Subject: [PATCH 16/52] Z9432F kernel dependency of platform module (#11941) Why I did it Z9432F Update the kernel dependency of platform module How I did it Modified the kernel version to current latest 5.10.0-12-2 --- platform/broadcom/sonic-platform-modules-dell/debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/platform/broadcom/sonic-platform-modules-dell/debian/control b/platform/broadcom/sonic-platform-modules-dell/debian/control index 7821dbd4f21b..8e3db2f0fd7e 100644 --- a/platform/broadcom/sonic-platform-modules-dell/debian/control +++ b/platform/broadcom/sonic-platform-modules-dell/debian/control @@ -57,7 +57,7 @@ Description: kernel modules for platform devices such as fan, led, sfp Package: platform-modules-z9432f Architecture: amd64 -Depends: linux-image-5.10.0-8-2-amd64-unsigned +Depends: linux-image-5.10.0-12-2-amd64-unsigned Description: kernel modules for platform devices such as fan, led, sfp Package: platform-modules-n3248pxe From 030de9f26d8ebd09ca56d7aa5ab1ede0ec187607 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Fri, 2 Sep 2022 14:07:48 +0800 Subject: [PATCH 17/52] [actions] Add github context env in label action. (#11926) --- .github/workflows/label.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml index 307cbd86f871..ec04157110ae 100644 --- a/.github/workflows/label.yml +++ b/.github/workflows/label.yml @@ -23,6 +23,8 @@ jobs: runs-on: ubuntu-latest steps: - name: approve + env: + GITHUB_CONTEXT: ${{ toJson(github) }} run: | set -e echo ${{ secrets.GITHUB_TOKEN }} | gh auth login --with-token From a762b35cbca847902410de13693f0e94edcbe8ee Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Fri, 2 Sep 2022 13:40:40 -0700 Subject: [PATCH 18/52] [arp_update]: Set failed IPv6 neighbors to incomplete (#11919) After pinging any failed IPv6 neighbor entries, set the remaining failed/incomplete entries to a permanent INCOMPLETE state. This manual setting to INCOMPLETE prevents these entries from automatically transitioning to FAILED state, and since they are now incomplete any subsequent NA messages for these neighbors is able to resolve the entry in the cache. Signed-off-by: Lawrence Lee --- files/scripts/arp_update | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/files/scripts/arp_update b/files/scripts/arp_update index df2758e187a7..5522ea46828a 100755 --- a/files/scripts/arp_update +++ b/files/scripts/arp_update @@ -78,6 +78,17 @@ while /bin/true; do eval `eval $ip6cmd` if [[ $SUBTYPE == "dualtor" ]]; then + # manually set any remaining FAILED/INCOMPLETE entries to permanently INCOMPLETE + # this prevents any remaining INCOMPLETE entries from automatically transitioning to FAILED + # once these entries are incomplete, any subsequent neighbor advertisement messages + # are able to resolve the entry + + # generates the following command for each failed or incomplete IPv6 neighbor + # ip neigh replace dev nud incomplete + neigh_replace_template="sed -e 's/^/ip neigh replace /' -e 's/,/ dev /' -e 's/$/ nud incomplete;/'" + ip_neigh_replace_cmd="ip -6 neigh show | grep -v fe80 | grep Vlan1000 | grep -E 'FAILED|INCOMPLETE' | cut -d ' ' -f 1,3 --output-delimiter=',' | $neigh_replace_template" + eval `eval $ip_neigh_replace_cmd` + # on dual ToR devices, try to resolve failed neighbor entries since # these entries will have tunnel routes installed, preventing normal # neighbor resolution (SWSS PR #2137) From a6843927d9b25ec32adf24841ff138da0d5efcbd Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Fri, 2 Sep 2022 13:50:42 -0700 Subject: [PATCH 19/52] [mux] skip mux operations during warm shutdown (#11937) * [mux] skip mux operations during warm shutdown - Enhance write_standby.py script to skip actions during warm shutdown. - Expand the support to BGP service. - MuX support was added by a previous PR. - don't skip action during warm recovery Signed-off-by: Ying Xie --- .../per_namespace/bgp.service.j2 | 2 +- files/scripts/write_standby.py | 24 +++++++++++++------ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/files/build_templates/per_namespace/bgp.service.j2 b/files/build_templates/per_namespace/bgp.service.j2 index 5ef30a164f69..0c9f01fe8b68 100644 --- a/files/build_templates/per_namespace/bgp.service.j2 +++ b/files/build_templates/per_namespace/bgp.service.j2 @@ -15,7 +15,7 @@ User={{ sonicadmin_user }} ExecStartPre=/usr/local/bin/{{docker_container_name}}.sh start{% if multi_instance == 'true' %} %i{% endif %} ExecStart=/usr/local/bin/{{docker_container_name}}.sh wait{% if multi_instance == 'true' %} %i{% endif %} ExecStop=/usr/local/bin/{{docker_container_name}}.sh stop{% if multi_instance == 'true' %} %i{% endif %} -ExecStopPost=/usr/local/bin/write_standby.py +ExecStopPost=/usr/local/bin/write_standby.py --shutdown bgp RestartSec=30 diff --git a/files/scripts/write_standby.py b/files/scripts/write_standby.py index 85b6ee8b04e8..13c5b17ea64e 100755 --- a/files/scripts/write_standby.py +++ b/files/scripts/write_standby.py @@ -20,13 +20,15 @@ class MuxStateWriter(object): Class used to write standby mux state to APP DB """ - def __init__(self, activeactive, activestandby): + def __init__(self, activeactive, activestandby, shutdown_module): self.config_db_connector = None self.appl_db_connector = None self.state_db_connector = None self.asic_db_connector = None self.default_active_active_state = activeactive self.default_active_standby_state = activestandby + self.shutdown_module = shutdown_module + self.is_shutdwon = (self.shutdown_module != None) @property def config_db(self): @@ -97,7 +99,15 @@ def is_warmrestart(self): tbl = Table(self.state_db, 'WARM_RESTART_ENABLE_TABLE') (status, value) = tbl.hget('system', 'enable') - return status and value == 'true' + if status and value == 'true': + return True + + if self.shutdown_module: + (status, value) = tbl.hget(self.shutdown_module, 'enable') + if status and value == 'true': + return True + + return False def get_all_mux_intfs_modes(self): """ @@ -153,7 +163,7 @@ def apply_mux_config(self): # If not running on a dual ToR system, take no action return - if self.is_warmrestart: + if self.is_warmrestart and self.is_shutdwon: # If in warmrestart context, take no action logger.log_warning("Skip setting mux state due to ongoing warmrestart.") return @@ -178,12 +188,12 @@ def apply_mux_config(self): parser.add_argument('-s', '--active_standby', help='state: intial state for "auto" and/or "manual" config in active-standby mode, default "standby"', type=str, required=False, default='standby') - parser.add_argument('--shutdown', help='write mux state after shutdown other services, supported: mux', - type=str, required=False, choices=['mux']) + parser.add_argument('--shutdown', help='write mux state after shutdown other services, supported: mux, bgp', + type=str, required=False, choices=['mux', 'bgp'], default=None) args = parser.parse_args() active_active_state = args.active_active active_standby_state = args.active_standby - if args.shutdown == 'mux': + if args.shutdown in ['mux', 'bgp']: active_active_state = "standby" - mux_writer = MuxStateWriter(activeactive=active_active_state, activestandby=active_standby_state) + mux_writer = MuxStateWriter(activeactive=active_active_state, activestandby=active_standby_state, shutdown_module=args.shutdown) mux_writer.apply_mux_config() From 750e1b3017f8dfafcc153c5b931b13bbf2f8e20e Mon Sep 17 00:00:00 2001 From: Ye Jianquan Date: Fri, 2 Sep 2022 15:53:54 -0700 Subject: [PATCH 20/52] Define whether a test is required by code (#11921) * Define whether a test is required by code Why I did it Define whether a test job is required before merging by code. Let the failure of multi-asic and t0-sonic don't block pr merge. The 'required' configuration can be modified by the owner in the future. How I did it Required: t1-lag, t0 Not required: multi-asic, t0-sonic How to verify it AZP itself verifies it. Signed-off-by: jianquanye@microsoft.com --- .azure-pipelines/run-test-template.yml | 3 --- azure-pipelines.yml | 15 ++++++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.azure-pipelines/run-test-template.yml b/.azure-pipelines/run-test-template.yml index 7ea0a982780f..2f1b3d8702d9 100644 --- a/.azure-pipelines/run-test-template.yml +++ b/.azure-pipelines/run-test-template.yml @@ -51,9 +51,6 @@ steps: rm -rf $(Build.ArtifactStagingDirectory)/* docker exec sonic-mgmt bash -c "/data/sonic-mgmt/tests/kvmtest.sh -en -T ${{ parameters.tbtype }} ${{ parameters.tbname }} ${{ parameters.dut }} ${{ parameters.section }}" displayName: "Run tests" - ${{ if eq(parameters.tbtype, 'multi-asic-t1-lag-pr') }}: - continueOnError: true - - script: | # save dut state if test fails virsh_version=$(virsh --version) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 5876080411a2..7856915f9b60 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -81,6 +81,10 @@ stages: - name: testbed_file value: vtestbed.csv +# For every test job: +# continueOnError: false means it's a required test job and will block merge if it fails +# continueOnError: true means it's an optional test job and will not block merge even though it fails(unless a required test job depends on its result) + jobs: - job: pool: sonictest @@ -133,7 +137,7 @@ stages: pool: sonictest displayName: "kvmtest-t0-part1" timeoutInMinutes: 360 - + continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml parameters: @@ -147,7 +151,7 @@ stages: pool: sonictest displayName: "kvmtest-t0-part2" timeoutInMinutes: 360 - + continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml parameters: @@ -165,6 +169,7 @@ stages: - t0_part1 - t0_part2 condition: always() + continueOnError: false variables: resultOfPart1: $[ dependencies.t0_part1.result ] resultOfPart2: $[ dependencies.t0_part2.result ] @@ -183,7 +188,7 @@ stages: pool: sonictest-t1-lag displayName: "kvmtest-t1-lag" timeoutInMinutes: 360 - + continueOnError: false steps: - template: .azure-pipelines/run-test-template.yml parameters: @@ -196,7 +201,7 @@ stages: pool: sonictest-sonic-t0 displayName: "kvmtest-t0-sonic" timeoutInMinutes: 360 - + continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml parameters: @@ -210,7 +215,7 @@ stages: pool: sonictest-ma displayName: "kvmtest-multi-asic-t1-lag" timeoutInMinutes: 240 - + continueOnError: true steps: - template: .azure-pipelines/run-test-template.yml parameters: From e96ec5a74cf67011b17b8a65e9167d7be41b2ebb Mon Sep 17 00:00:00 2001 From: Lawrence Lee Date: Fri, 2 Sep 2022 16:06:10 -0700 Subject: [PATCH 21/52] [kernel]: Submodule update (#11947) Include following commit: - 443253f [patch]: Add accpt_untracked_na kernel param (#292) Signed-off-by: Lawrence Lee --- src/sonic-linux-kernel | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-linux-kernel b/src/sonic-linux-kernel index fdd9bac78cfc..443253f637ec 160000 --- a/src/sonic-linux-kernel +++ b/src/sonic-linux-kernel @@ -1 +1 @@ -Subproject commit fdd9bac78cfc2bbe932833b38c2191b1d382ed07 +Subproject commit 443253f637ec3dccac246199977a6d65346d7878 From 71d63a7be75111539a04b4c54fdb617fbe7e0081 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Fri, 2 Sep 2022 21:52:24 -0700 Subject: [PATCH 22/52] New commits in swss-common: (#11954) * 651f52b (HEAD, origin/master, origin/HEAD, master) Change syslog level of Event Publish (#677) * aca253a Add routing rule table for DASH (#668) --- src/sonic-swss-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-swss-common b/src/sonic-swss-common index 56b0f1877a02..651f52b8e511 160000 --- a/src/sonic-swss-common +++ b/src/sonic-swss-common @@ -1 +1 @@ -Subproject commit 56b0f1877a02c43b51595a2d7e6f09e1fabd3d32 +Subproject commit 651f52b8e51107112c8205d12608723357ecbe5e From 6a54bc439a980c7fc10e7a74de6472ee7731f8a5 Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Sat, 3 Sep 2022 07:33:25 -0700 Subject: [PATCH 23/52] Streaming structured events implementation (#11848) With this PR in, you flap BGP and use events_tool to see the published events. With telemetry PR #111 in and corresponding submodule update done in buildimage, one could run gnmi_cli to capture BGP flap events. --- dockers/docker-eventd/Dockerfile.j2 | 36 + dockers/docker-eventd/critical_processes | 1 + dockers/docker-eventd/start.sh | 6 + dockers/docker-eventd/supervisord.conf | 52 + dockers/docker-fpm-frr/Dockerfile.j2 | 6 + dockers/docker-fpm-frr/bgp_regex.json | 8 + dockers/docker-fpm-frr/events_info.json | 10 + files/build_templates/docker_image_ctl.j2 | 1 + files/build_templates/eventd.service.j2 | 17 + files/build_templates/init_cfg.json.j2 | 3 +- files/build_templates/rsyslog_plugin.conf.j2 | 19 + .../build_templates/sonic_debian_extension.j2 | 4 + rules/docker-config-engine-bullseye.mk | 4 +- rules/docker-config-engine-buster.mk | 1 + rules/docker-eventd.dep | 11 + rules/docker-eventd.mk | 47 + rules/eventd.dep | 10 + rules/eventd.mk | 19 + rules/scripts.mk | 4 + rules/telemetry.mk | 5 +- slave.mk | 2 + src/sonic-eventd/Makefile | 84 ++ src/sonic-eventd/debian/changelog | 5 + src/sonic-eventd/debian/compat | 1 + src/sonic-eventd/debian/control | 14 + src/sonic-eventd/debian/rules | 6 + src/sonic-eventd/rsyslog_plugin/main.cpp | 57 ++ .../rsyslog_plugin/rsyslog_plugin.cpp | 135 +++ .../rsyslog_plugin/rsyslog_plugin.h | 40 + src/sonic-eventd/rsyslog_plugin/subdir.mk | 13 + .../rsyslog_plugin/syslog_parser.cpp | 65 ++ .../rsyslog_plugin/syslog_parser.h | 46 + .../rsyslog_plugin/timestamp_formatter.cpp | 74 ++ .../rsyslog_plugin/timestamp_formatter.h | 27 + .../rsyslog_plugin_ut.cpp | 274 ++++++ .../rsyslog_plugin_tests/subdir.mk | 12 + .../rsyslog_plugin_tests/test_regex_1.rc.json | 0 .../rsyslog_plugin_tests/test_regex_2.rc.json | 7 + .../rsyslog_plugin_tests/test_regex_3.rc.json | 6 + .../rsyslog_plugin_tests/test_regex_4.rc.json | 7 + .../rsyslog_plugin_tests/test_regex_5.rc.json | 7 + .../rsyslog_plugin_tests/test_syslogs.txt | 4 + .../rsyslog_plugin_tests/test_syslogs_2.txt | 3 + src/sonic-eventd/src/eventd.cpp | 798 +++++++++++++++ src/sonic-eventd/src/eventd.h | 268 +++++ src/sonic-eventd/src/main.cpp | 18 + src/sonic-eventd/src/subdir.mk | 13 + src/sonic-eventd/tests/eventd_ut.cpp | 915 ++++++++++++++++++ src/sonic-eventd/tests/main.cpp | 97 ++ .../database_config.json | 112 +++ .../database_config0.json | 92 ++ .../database_config1.json | 92 ++ .../database_global.json | 16 + src/sonic-eventd/tests/subdir.mk | 12 + src/sonic-eventd/tools/events_publish_tool.py | 97 ++ src/sonic-eventd/tools/events_tool.cpp | 328 +++++++ src/sonic-eventd/tools/events_volume_test.py | 68 ++ src/sonic-eventd/tools/sample_ip.json | 1 + src/sonic-eventd/tools/subdir.mk | 12 + 59 files changed, 4088 insertions(+), 4 deletions(-) create mode 100644 dockers/docker-eventd/Dockerfile.j2 create mode 100644 dockers/docker-eventd/critical_processes create mode 100755 dockers/docker-eventd/start.sh create mode 100644 dockers/docker-eventd/supervisord.conf create mode 100644 dockers/docker-fpm-frr/bgp_regex.json create mode 100644 dockers/docker-fpm-frr/events_info.json create mode 100644 files/build_templates/eventd.service.j2 create mode 100644 files/build_templates/rsyslog_plugin.conf.j2 create mode 100644 rules/docker-eventd.dep create mode 100644 rules/docker-eventd.mk create mode 100644 rules/eventd.dep create mode 100644 rules/eventd.mk create mode 100644 src/sonic-eventd/Makefile create mode 100644 src/sonic-eventd/debian/changelog create mode 100644 src/sonic-eventd/debian/compat create mode 100644 src/sonic-eventd/debian/control create mode 100755 src/sonic-eventd/debian/rules create mode 100644 src/sonic-eventd/rsyslog_plugin/main.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h create mode 100644 src/sonic-eventd/rsyslog_plugin/subdir.mk create mode 100644 src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/syslog_parser.h create mode 100644 src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/subdir.mk create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt create mode 100644 src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt create mode 100644 src/sonic-eventd/src/eventd.cpp create mode 100644 src/sonic-eventd/src/eventd.h create mode 100644 src/sonic-eventd/src/main.cpp create mode 100644 src/sonic-eventd/src/subdir.mk create mode 100644 src/sonic-eventd/tests/eventd_ut.cpp create mode 100644 src/sonic-eventd/tests/main.cpp create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config0.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_config1.json create mode 100644 src/sonic-eventd/tests/redis_multi_db_ut_config/database_global.json create mode 100644 src/sonic-eventd/tests/subdir.mk create mode 100644 src/sonic-eventd/tools/events_publish_tool.py create mode 100644 src/sonic-eventd/tools/events_tool.cpp create mode 100644 src/sonic-eventd/tools/events_volume_test.py create mode 100644 src/sonic-eventd/tools/sample_ip.json create mode 100644 src/sonic-eventd/tools/subdir.mk diff --git a/dockers/docker-eventd/Dockerfile.j2 b/dockers/docker-eventd/Dockerfile.j2 new file mode 100644 index 000000000000..8d935dc9f365 --- /dev/null +++ b/dockers/docker-eventd/Dockerfile.j2 @@ -0,0 +1,36 @@ +{% from "dockers/dockerfile-macros.j2" import install_debian_packages, install_python_wheels, copy_files %} +FROM docker-config-engine-bullseye-{{DOCKER_USERNAME}}:{{DOCKER_USERTAG}} + +ARG docker_container_name +ARG image_version +RUN [ -f /etc/rsyslog.conf ] && sed -ri "s/%syslogtag%/$docker_container_name#%syslogtag%/;" /etc/rsyslog.conf + +# Make apt-get non-interactive +ENV DEBIAN_FRONTEND=noninteractive + +# Pass the image_version to container +ENV IMAGE_VERSION=$image_version + +# Update apt's cache of available packages +RUN apt-get update + +{% if docker_eventd_debs.strip() -%} +# Copy built Debian packages +{{ copy_files("debs/", docker_eventd_debs.split(' '), "/debs/") }} + +# Install built Debian packages and implicitly install their dependencies +{{ install_debian_packages(docker_eventd_debs.split(' ')) }} +{%- endif %} + +# Clean up +RUN apt-get clean -y && \ + apt-get autoclean -y && \ + apt-get autoremove -y && \ + rm -rf /debs + +COPY ["start.sh", "/usr/bin/"] +COPY ["supervisord.conf", "/etc/supervisor/conf.d/"] +COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] +COPY ["critical_processes", "/etc/supervisor"] + +ENTRYPOINT ["/usr/local/bin/supervisord"] diff --git a/dockers/docker-eventd/critical_processes b/dockers/docker-eventd/critical_processes new file mode 100644 index 000000000000..8ff28edbc148 --- /dev/null +++ b/dockers/docker-eventd/critical_processes @@ -0,0 +1 @@ +program:eventd diff --git a/dockers/docker-eventd/start.sh b/dockers/docker-eventd/start.sh new file mode 100755 index 000000000000..60cd6a00aecb --- /dev/null +++ b/dockers/docker-eventd/start.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +if [ "${RUNTIME_OWNER}" == "" ]; then + RUNTIME_OWNER="kube" +fi + diff --git a/dockers/docker-eventd/supervisord.conf b/dockers/docker-eventd/supervisord.conf new file mode 100644 index 000000000000..5d9a50bca2ae --- /dev/null +++ b/dockers/docker-eventd/supervisord.conf @@ -0,0 +1,52 @@ +[supervisord] +logfile_maxbytes=1MB +logfile_backups=2 +nodaemon=true + +[eventlistener:dependent-startup] +command=python3 -m supervisord_dependent_startup +autostart=true +autorestart=unexpected +startretries=0 +exitcodes=0,3 +events=PROCESS_STATE +buffer_size=1024 + +[eventlistener:supervisor-proc-exit-listener] +command=/usr/bin/supervisor-proc-exit-listener --container-name eventd +events=PROCESS_STATE_EXITED,PROCESS_STATE_RUNNING +autostart=true +autorestart=unexpected +buffer_size=1024 + +[program:rsyslogd] +command=/usr/sbin/rsyslogd -n -iNONE +priority=1 +autostart=false +autorestart=unexpected +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true + +[program:start] +command=/usr/bin/start.sh +priority=2 +autostart=false +autorestart=false +startsecs=0 +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true +dependent_startup_wait_for=rsyslogd:running + + +[program:eventd] +command=/usr/sbin/eventd +priority=3 +autostart=false +autorestart=false +stdout_logfile=syslog +stderr_logfile=syslog +dependent_startup=true +dependent_startup_wait_for=start:exited + diff --git a/dockers/docker-fpm-frr/Dockerfile.j2 b/dockers/docker-fpm-frr/Dockerfile.j2 index ad665e71ceae..fd7ad0f08ed4 100644 --- a/dockers/docker-fpm-frr/Dockerfile.j2 +++ b/dockers/docker-fpm-frr/Dockerfile.j2 @@ -55,9 +55,15 @@ COPY ["TSC", "/usr/bin/TSC"] COPY ["TS", "/usr/bin/TS"] COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] COPY ["zsocket.sh", "/usr/bin/"] +COPY ["*.json", "/etc/rsyslog.d/"] +COPY ["files/rsyslog_plugin.conf.j2", "/etc/rsyslog.d/"] RUN chmod a+x /usr/bin/TSA && \ chmod a+x /usr/bin/TSB && \ chmod a+x /usr/bin/TSC && \ chmod a+x /usr/bin/zsocket.sh +RUN j2 -f json /etc/rsyslog.d/rsyslog_plugin.conf.j2 /etc/rsyslog.d/events_info.json > /etc/rsyslog.d/bgp_events.conf +RUN rm -f /etc/rsyslog.d/rsyslog_plugin.conf.j2* +RUN rm -f /etc/rsyslog.d/events_info.json* + ENTRYPOINT ["/usr/bin/docker_init.sh"] diff --git a/dockers/docker-fpm-frr/bgp_regex.json b/dockers/docker-fpm-frr/bgp_regex.json new file mode 100644 index 000000000000..898b5b060ebe --- /dev/null +++ b/dockers/docker-fpm-frr/bgp_regex.json @@ -0,0 +1,8 @@ +[ + { + "tag": "bgp-state", + "regex": "Peer .default\\|([0-9a-f:.]*[0-9a-f]*). admin state is set to .(up|down).", + "params": [ "ip", "status" ] + } +] + diff --git a/dockers/docker-fpm-frr/events_info.json b/dockers/docker-fpm-frr/events_info.json new file mode 100644 index 000000000000..66fa9a727ae2 --- /dev/null +++ b/dockers/docker-fpm-frr/events_info.json @@ -0,0 +1,10 @@ +{ + "yang_module": "sonic-events-bgp", + "proclist": [ + { + "name": "bgp", + "parse_json": "bgp_regex.json" + } + ] +} + diff --git a/files/build_templates/docker_image_ctl.j2 b/files/build_templates/docker_image_ctl.j2 index 99051ee62d8c..a77706cad497 100644 --- a/files/build_templates/docker_image_ctl.j2 +++ b/files/build_templates/docker_image_ctl.j2 @@ -515,6 +515,7 @@ start() { {%- endif -%} {%- if docker_container_name == "bgp" %} -v /etc/sonic/frr/$DEV:/etc/frr:rw \ + -v /usr/share/sonic/scripts:/usr/share/sonic/scripts:ro \ {%- endif %} {%- if docker_container_name == "database" %} $DB_OPT \ diff --git a/files/build_templates/eventd.service.j2 b/files/build_templates/eventd.service.j2 new file mode 100644 index 000000000000..0ad7f52ee83d --- /dev/null +++ b/files/build_templates/eventd.service.j2 @@ -0,0 +1,17 @@ +[Unit] +Description=EVENTD container +Requires=updategraph.service +After=updategraph.service +BindsTo=sonic.target +After=sonic.target +StartLimitIntervalSec=1200 +StartLimitBurst=3 + +[Service] +ExecStartPre=/usr/bin/{{docker_container_name}}.sh start +ExecStart=/usr/bin/{{docker_container_name}}.sh wait +ExecStop=/usr/bin/{{docker_container_name}}.sh stop +RestartSec=30 + +[Install] +WantedBy=sonic.target diff --git a/files/build_templates/init_cfg.json.j2 b/files/build_templates/init_cfg.json.j2 index 7de0ad977807..8e92807f4e2c 100644 --- a/files/build_templates/init_cfg.json.j2 +++ b/files/build_templates/init_cfg.json.j2 @@ -39,6 +39,7 @@ ("pmon", "enabled", false, "enabled"), ("radv", "enabled", false, "enabled"), ("snmp", "enabled", true, "enabled"), + ("eventd", "enabled", true, "enabled"), ("swss", "enabled", false, "enabled"), ("syncd", "enabled", false, "enabled"), ("teamd", "enabled", false, "enabled")] %} @@ -69,7 +70,7 @@ "check_up_status" : "false", {%- endif %} {%- if include_kubernetes == "y" %} -{%- if feature in ["lldp", "pmon", "radv", "snmp", "telemetry"] %} +{%- if feature in ["lldp", "pmon", "radv", "eventd", "snmp", "telemetry"] %} "set_owner": "kube", {% else %} "set_owner": "local", {% endif %} {% endif %} "high_mem_alert": "disabled" diff --git a/files/build_templates/rsyslog_plugin.conf.j2 b/files/build_templates/rsyslog_plugin.conf.j2 new file mode 100644 index 000000000000..ec19c62a78f6 --- /dev/null +++ b/files/build_templates/rsyslog_plugin.conf.j2 @@ -0,0 +1,19 @@ +## rsyslog-plugin for streaming telemetry via gnmi + + + +template(name="prog_msg" type="list") { + property(name="msg") + constant(value="\n") +} + +$ModLoad omprog + +{% for proc in proclist %} +if re_match($programname, "{{ proc.name }}") then { + action(type="omprog" + binary="/usr/share/sonic/scripts/rsyslog_plugin -r /etc/rsyslog.d/{{ proc.parse_json }} -m {{ yang_module }}" + output="/var/log/rsyslog_plugin.log" + template="prog_msg") +} +{% endfor %} diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 4b7a77b3151c..56b8290cc12e 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -799,6 +799,10 @@ sudo bash -c "echo { > $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_name {% endfor %} sudo bash -c "echo } >> $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_names.json" +# copy rsyslog plugin binary for use by all dockers that use plugin to publish events. +sudo mkdir -p ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS} +sudo cp ${files_path}/rsyslog_plugin ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS}/ + {% for script in installer_start_scripts.split(' ') -%} if [ -f $TARGET_MACHINE"_{{script}}" ]; then sudo cp $TARGET_MACHINE"_{{script}}" $FILESYSTEM_ROOT/usr/bin/{{script}} diff --git a/rules/docker-config-engine-bullseye.mk b/rules/docker-config-engine-bullseye.mk index c125aa65b209..ea0ae43b54b9 100644 --- a/rules/docker-config-engine-bullseye.mk +++ b/rules/docker-config-engine-bullseye.mk @@ -8,13 +8,15 @@ $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DEPENDS += $(LIBSWSSCOMMON) \ $(LIBYANG_CPP) \ $(LIBYANG_PY3) \ $(PYTHON3_SWSSCOMMON) \ - $(SONIC_DB_CLI) + $(SONIC_DB_CLI) \ + $(SONIC_EVENTD) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(SONIC_YANG_MGMT_PY3) \ $(SONIC_YANG_MODELS_PY3) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_LOAD_DOCKERS += $(DOCKER_BASE_BULLSEYE) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(SWSS_VARS_TEMPLATE) +$(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $(RSYSLOG_PLUGIN_CONF_J2) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_FILES += $($(SONIC_CTRMGRD)_CONTAINER_SCRIPT) $(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS = $($(DOCKER_BASE_BULLSEYE)_DBG_DEPENDS) \ diff --git a/rules/docker-config-engine-buster.mk b/rules/docker-config-engine-buster.mk index ae5589a59595..38a94bae4c1d 100644 --- a/rules/docker-config-engine-buster.mk +++ b/rules/docker-config-engine-buster.mk @@ -15,6 +15,7 @@ $(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_PY_COMMON_PY3) \ $(DOCKER_CONFIG_ENGINE_BUSTER)_PYTHON_WHEELS += $(SONIC_CONFIG_ENGINE_PY3) $(DOCKER_CONFIG_ENGINE_BUSTER)_LOAD_DOCKERS += $(DOCKER_BASE_BUSTER) $(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $(SWSS_VARS_TEMPLATE) +$(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $(RSYSLOG_PLUGIN_CONF_J2) $(DOCKER_CONFIG_ENGINE_BUSTER)_FILES += $($(SONIC_CTRMGRD)_CONTAINER_SCRIPT) $(DOCKER_CONFIG_ENGINE_BUSTER)_DBG_DEPENDS = $($(DOCKER_BASE_BUSTER)_DBG_DEPENDS) \ diff --git a/rules/docker-eventd.dep b/rules/docker-eventd.dep new file mode 100644 index 000000000000..382513e5eb82 --- /dev/null +++ b/rules/docker-eventd.dep @@ -0,0 +1,11 @@ + +DPATH := $($(DOCKER_EVENTD)_PATH) +DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/docker-eventd.mk rules/docker-eventd.dep +DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) +DEP_FILES += $(shell git ls-files $(DPATH)) + +$(DOCKER_EVENTD)_CACHE_MODE := GIT_CONTENT_SHA +$(DOCKER_EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) +$(DOCKER_EVENTD)_DEP_FILES := $(DEP_FILES) + +$(eval $(call add_dbg_docker,$(DOCKER_EVENTD),$(DOCKER_EVENTD_DBG))) diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk new file mode 100644 index 000000000000..c69fee09e569 --- /dev/null +++ b/rules/docker-eventd.mk @@ -0,0 +1,47 @@ +# docker image for eventd + +DOCKER_EVENTD_STEM = docker-eventd +DOCKER_EVENTD = $(DOCKER_EVENTD_STEM).gz +DOCKER_EVENTD_DBG = $(DOCKER_EVENTD_STEM)-$(DBG_IMAGE_MARK).gz + +$(DOCKER_EVENTD)_DEPENDS += $(SONIC_EVENTD) + +$(DOCKER_EVENTD)_DBG_DEPENDS = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_DEPENDS) +$(DOCKER_EVENTD)_DBG_DEPENDS += $(SONIC_EVENTD_DBG) $(LIBSWSSCOMMON_DBG) + +$(DOCKER_EVENTD)_DBG_IMAGE_PACKAGES = $($(DOCKER_CONFIG_ENGINE_BULLSEYE)_DBG_IMAGE_PACKAGES) + +$(DOCKER_EVENTD)_LOAD_DOCKERS = $(DOCKER_CONFIG_ENGINE_BULLSEYE) + +$(DOCKER_EVENTD)_PATH = $(DOCKERS_PATH)/$(DOCKER_EVENTD_STEM) + +$(DOCKER_EVENTD)_INSTALL_PYTHON_WHEELS = $(SONIC_UTILITIES_PY3) +$(DOCKER_EVENTD)_INSTALL_DEBS = $(PYTHON3_SWSSCOMMON) + +$(DOCKER_EVENTD)_VERSION = 1.0.0 +$(DOCKER_EVENTD)_PACKAGE_NAME = eventd + +$(DOCKER_DHCP)_SERVICE_REQUIRES = updategraph +$(DOCKER_DHCP)_SERVICE_AFTER = database + +SONIC_DOCKER_IMAGES += $(DOCKER_EVENTD) +SONIC_INSTALL_DOCKER_IMAGES += $(DOCKER_EVENTD) + +SONIC_DOCKER_DBG_IMAGES += $(DOCKER_EVENTD_DBG) +SONIC_INSTALL_DOCKER_DBG_IMAGES += $(DOCKER_EVENTD_DBG) + +$(DOCKER_EVENTD)_CONTAINER_NAME = eventd +$(DOCKER_EVENTD)_RUN_OPT += --privileged -t +$(DOCKER_EVENTD)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro + +SONIC_BULLSEYE_DOCKERS += $(DOCKER_EVENTD) +SONIC_BULLSEYE_DBG_DOCKERS += $(DOCKER_EVENTD_DBG) + +$(DOCKER_EVENTD)_FILESPATH = $($(SONIC_EVENTD)_SRC_PATH)/rsyslog_plugin + +$(DOCKER_EVENTD)_PLUGIN = rsyslog_plugin +$($(DOCKER_EVENTD)_PLUGIN)_PATH = $($(DOCKER_EVENTD)_FILESPATH) + +SONIC_COPY_FILES += $($(DOCKER_EVENTD)_PLUGIN) +$(DOCKER_EVENTD)_SHARED_FILES = $($(DOCKER_EVENTD)_PLUGIN) + diff --git a/rules/eventd.dep b/rules/eventd.dep new file mode 100644 index 000000000000..12f32a30f2c7 --- /dev/null +++ b/rules/eventd.dep @@ -0,0 +1,10 @@ + +SPATH := $($(SONIC_EVENTD)_SRC_PATH) +DEP_FILES := $(SONIC_COMMON_FILES_LIST) rules/eventd.mk rules/eventd.dep +DEP_FILES += $(SONIC_COMMON_BASE_FILES_LIST) +DEP_FILES := $(addprefix $(SPATH)/,$(shell cd $(SPATH) && git ls-files)) + +$(SONIC_EVENTD)_CACHE_MODE := GIT_CONTENT_SHA +$(SONIC_EVENTD)_DEP_FLAGS := $(SONIC_COMMON_FLAGS_LIST) +$(SONIC_EVENTD)_DEP_FILES := $(DEP_FILES) + diff --git a/rules/eventd.mk b/rules/eventd.mk new file mode 100644 index 000000000000..9eea21a4cfb5 --- /dev/null +++ b/rules/eventd.mk @@ -0,0 +1,19 @@ +# eventd package + +SONIC_EVENTD_VERSION = 1.0.0-0 +SONIC_EVENTD_PKG_NAME = eventd + +SONIC_EVENTD = sonic-$(SONIC_EVENTD_PKG_NAME)_$(SONIC_EVENTD_VERSION)_$(CONFIGURED_ARCH).deb +$(SONIC_EVENTD)_SRC_PATH = $(SRC_PATH)/sonic-eventd +$(SONIC_EVENTD)_DEPENDS += $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) + +SONIC_DPKG_DEBS += $(SONIC_EVENTD) + +SONIC_EVENTD_DBG = sonic-$(SONIC_EVENTD_PKG_NAME)-dbgsym_$(SONIC_EVENTD_VERSION)_$(CONFIGURED_ARCH).deb +$(eval $(call add_derived_package,$(SONIC_EVENTD),$(SONIC_EVENTD_DBG))) + +# The .c, .cpp, .h & .hpp files under src/{$DBG_SRC_ARCHIVE list} +# are archived into debug one image to facilitate debugging. +# +DBG_SRC_ARCHIVE += sonic-eventd + diff --git a/rules/scripts.mk b/rules/scripts.mk index ce6a8eb90025..12919d520b09 100644 --- a/rules/scripts.mk +++ b/rules/scripts.mk @@ -32,6 +32,9 @@ $(SWSS_VARS_TEMPLATE)_PATH = files/build_templates COPP_CONFIG_TEMPLATE = copp_cfg.j2 $(COPP_CONFIG_TEMPLATE)_PATH = files/image_config/copp +RSYSLOG_PLUGIN_CONF_J2 = rsyslog_plugin.conf.j2 +$(RSYSLOG_PLUGIN_CONF_J2)_PATH = files/build_templates + SONIC_COPY_FILES += $(CONFIGDB_LOAD_SCRIPT) \ $(ARP_UPDATE_SCRIPT) \ $(ARP_UPDATE_VARS_TEMPLATE) \ @@ -42,4 +45,5 @@ SONIC_COPY_FILES += $(CONFIGDB_LOAD_SCRIPT) \ $(SYSCTL_NET_CONFIG) \ $(UPDATE_CHASSISDB_CONFIG_SCRIPT) \ $(SWSS_VARS_TEMPLATE) \ + $(RSYSLOG_PLUGIN_CONF_J2) \ $(COPP_CONFIG_TEMPLATE) diff --git a/rules/telemetry.mk b/rules/telemetry.mk index 24fe4ae2fe52..942e9797726a 100644 --- a/rules/telemetry.mk +++ b/rules/telemetry.mk @@ -2,6 +2,7 @@ SONIC_TELEMETRY = sonic-gnmi_0.1_$(CONFIGURED_ARCH).deb $(SONIC_TELEMETRY)_SRC_PATH = $(SRC_PATH)/sonic-gnmi -$(SONIC_TELEMETRY)_DEPENDS = $(SONIC_MGMT_COMMON) $(SONIC_MGMT_COMMON_CODEGEN) -$(SONIC_TELEMETRY)_RDEPENDS = +$(SONIC_TELEMETRY)_DEPENDS = $(SONIC_MGMT_COMMON) $(SONIC_MGMT_COMMON_CODEGEN) \ + $(LIBSWSSCOMMON_DEV) $(LIBSWSSCOMMON) +$(SONIC_TELEMETRY)_RDEPENDS = $(LIBSWSSCOMMON) $(LIBSWSSCOMMON_DEV) SONIC_DPKG_DEBS += $(SONIC_TELEMETRY) diff --git a/slave.mk b/slave.mk index 7cdee954ad73..f720061b2e52 100644 --- a/slave.mk +++ b/slave.mk @@ -1292,6 +1292,8 @@ $(addprefix $(TARGET_PATH)/, $(SONIC_INSTALLERS)) : $(TARGET_PATH)/% : \ $(if $($(docker:-dbg.gz=.gz)_MACHINE),\ mv $($(docker:-dbg.gz=.gz)_CONTAINER_NAME).sh $($(docker:-dbg.gz=.gz)_MACHINE)_$($(docker:-dbg.gz=.gz)_CONTAINER_NAME).sh ) + $(foreach file, $($(docker)_SHARED_FILES), \ + { cp $($(file)_PATH)/$(file) $(FILES_PATH)/ $(LOG) || exit 1 ; } ; ) ) # Exported variables are used by sonic_debian_extension.sh diff --git a/src/sonic-eventd/Makefile b/src/sonic-eventd/Makefile new file mode 100644 index 000000000000..00d3199a65bc --- /dev/null +++ b/src/sonic-eventd/Makefile @@ -0,0 +1,84 @@ +RM := rm -rf +EVENTD_TARGET := eventd +EVENTD_TEST := tests/tests +EVENTD_TOOL := tools/events_tool +EVENTD_PUBLISH_TOOL := tools/events_publish_tool.py +RSYSLOG-PLUGIN_TARGET := rsyslog_plugin/rsyslog_plugin +RSYSLOG-PLUGIN_TEST := rsyslog_plugin_tests/tests +CP := cp +MKDIR := mkdir +CC := g++ +LIBS := -levent -lhiredis -lswsscommon -lpthread -lboost_thread -lboost_system -lzmq -lboost_serialization -luuid -llua5.1 +TEST_LIBS := -L/usr/src/gtest -lgtest -lgtest_main -lgmock -lgmock_main + +CFLAGS += -Wall -std=c++17 -fPIE -I$(PWD)/../sonic-swss-common/common +PWD := $(shell pwd) + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(strip $(C_DEPS)),) +-include $(C_DEPS) $(OBJS) +endif +endif + +-include src/subdir.mk +-include tests/subdir.mk +-include tools/subdir.mk +-include rsyslog_plugin/subdir.mk +-include rsyslog_plugin_tests/subdir.mk + +all: sonic-eventd eventd-tests eventd-tool rsyslog-plugin rsyslog-plugin-tests + +sonic-eventd: $(OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TARGET) $(OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +eventd-tool: $(TOOL_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TOOL) $(TOOL_OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +rsyslog-plugin: $(RSYSLOG-PLUGIN_OBJS) + @echo 'Buidling Target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(RSYSLOG-PLUGIN_TARGET) $(RSYSLOG-PLUGIN_OBJS) $(LIBS) + @echo 'Finished building target: $@' + @echo ' ' + +eventd-tests: $(TEST_OBJS) + @echo 'Building target: $@' + @echo 'Invoking: G++ Linker' + $(CC) $(LDFLAGS) -o $(EVENTD_TEST) $(TEST_OBJS) $(LIBS) $(TEST_LIBS) + @echo 'Finished building target: $@' + $(EVENTD_TEST) + @echo 'Finished running tests' + @echo ' ' + +rsyslog-plugin-tests: $(RSYSLOG-PLUGIN-TEST_OBJS) + @echo 'BUILDING target: $@' + @echo 'Invoking G++ Linker' + $(CC) $(LDFLAGS) -o $(RSYSLOG-PLUGIN_TEST) $(RSYSLOG-PLUGIN-TEST_OBJS) $(LIBS) $(TEST_LIBS) + @echo 'Finished building target: $@' + $(RSYSLOG-PLUGIN_TEST) + @echo 'Finished running tests' + @echo ' ' + +install: + $(MKDIR) -p $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_TARGET) $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_TOOL) $(DESTDIR)/usr/sbin + $(CP) $(EVENTD_PUBLISH_TOOL) $(DESTDIR)/usr/sbin + +deinstall: + $(RM) $(DESTDIR)/usr/sbin/$(EVENTD_TARGET) + $(RM) $(DESTDIR)/usr/sbin/$(RSYSLOG-PLUGIN_TARGET) + $(RM) -rf $(DESTDIR)/usr/sbin + +clean: + -@echo ' ' + +.PHONY: all clean dependents diff --git a/src/sonic-eventd/debian/changelog b/src/sonic-eventd/debian/changelog new file mode 100644 index 000000000000..eba3bf10ea53 --- /dev/null +++ b/src/sonic-eventd/debian/changelog @@ -0,0 +1,5 @@ +sonic-eventd (1.0.0-0) UNRELEASED; urgency=medium + + * Initial release. + +-- Renuka Manavalan diff --git a/src/sonic-eventd/debian/compat b/src/sonic-eventd/debian/compat new file mode 100644 index 000000000000..48082f72f087 --- /dev/null +++ b/src/sonic-eventd/debian/compat @@ -0,0 +1 @@ +12 diff --git a/src/sonic-eventd/debian/control b/src/sonic-eventd/debian/control new file mode 100644 index 000000000000..95ae6fd76452 --- /dev/null +++ b/src/sonic-eventd/debian/control @@ -0,0 +1,14 @@ +Source: sonic-eventd +Section: devel +Priority: optional +Maintainer: Renuka Manavalan +Build-Depends: debhelper (>= 12.0.0), libevent-dev, libboost-thread-dev, libboost-system-dev, libswsscommon-dev +Standards-Version: 3.9.3 +Homepage: https://github.com/Azure/sonic-buildimage +XS-Go-Import-Path: github.com/Azure/sonic-buildimage + +Package: sonic-eventd +Architecture: any +Built-Using: ${misc:Built-Using} +Depends: ${shlibs:Depends} +Description: SONiC event service diff --git a/src/sonic-eventd/debian/rules b/src/sonic-eventd/debian/rules new file mode 100755 index 000000000000..ac2cd63889ef --- /dev/null +++ b/src/sonic-eventd/debian/rules @@ -0,0 +1,6 @@ +#!/usr/bin/make -f + +export DEB_BUILD_MAINT_OPTIONS=hardening=+all + +%: + dh $@ --parallel diff --git a/src/sonic-eventd/rsyslog_plugin/main.cpp b/src/sonic-eventd/rsyslog_plugin/main.cpp new file mode 100644 index 000000000000..53162608c5a9 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/main.cpp @@ -0,0 +1,57 @@ +#include +#include +#include +#include "rsyslog_plugin.h" + +#define SUCCESS_CODE 0 +#define INVALID_REGEX_ERROR_CODE 1 +#define EVENT_INIT_PUBLISH_ERROR_CODE 2 +#define MISSING_ARGS_ERROR_CODE 3 + +void showUsage() { + cout << "Usage for rsyslog_plugin: \n" << "options\n" + << "\t-r,required,type=string\t\tPath to regex file\n" + << "\t-m,required,type=string\t\tYANG module name of source generating syslog message\n" + << "\t-h \t\tHelp" + << endl; +} + +int main(int argc, char** argv) { + string regexPath; + string moduleName; + int optionVal; + + while((optionVal = getopt(argc, argv, "r:m:h")) != -1) { + switch(optionVal) { + case 'r': + regexPath = optarg; + break; + case 'm': + moduleName = optarg; + break; + case 'h': + case '?': + default: + showUsage(); + return 1; + } + } + + if(regexPath.empty() || moduleName.empty()) { // Missing required rc path + cerr << "Error: Missing regexPath and moduleName." << endl; + return MISSING_ARGS_ERROR_CODE; + } + + unique_ptr plugin(new RsyslogPlugin(moduleName, regexPath)); + int returnCode = plugin->onInit(); + if(returnCode == INVALID_REGEX_ERROR_CODE) { + SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized due to invalid regex file provided.\n"); + return returnCode; + } else if(returnCode == EVENT_INIT_PUBLISH_ERROR_CODE) { + SWSS_LOG_ERROR("Rsyslog plugin was not able to be initialized due to event_init_publish call failing.\n"); + return returnCode; + } + + plugin->run(); + return SUCCESS_CODE; +} diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp new file mode 100644 index 000000000000..3786c5f0fea9 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.cpp @@ -0,0 +1,135 @@ +#include +#include +#include +#include +#include +#include +#include "rsyslog_plugin.h" +#include "json.hpp" + +using json = nlohmann::json; + +bool RsyslogPlugin::onMessage(string msg, lua_State* luaState) { + string tag; + event_params_t paramDict; + if(!m_parser->parseMessage(msg, tag, paramDict, luaState)) { + SWSS_LOG_DEBUG("%s was not able to be parsed into a structured event\n", msg.c_str()); + return false; + } else { + int returnCode = event_publish(m_eventHandle, tag, ¶mDict); + if(returnCode != 0) { + SWSS_LOG_ERROR("rsyslog_plugin was not able to publish event for %s.\n", tag.c_str()); + return false; + } + return true; + } +} + +void parseParams(vector params, vector& eventParams) { + for(long unsigned int i = 0; i < params.size(); i++) { + if(params[i].empty()) { + SWSS_LOG_ERROR("Empty param provided in regex file\n"); + continue; + } + EventParam ep = EventParam(); + auto delimPos = params[i].find(':'); + if(delimPos == string::npos) { // no lua code + ep.paramName = params[i]; + } else { + ep.paramName = params[i].substr(0, delimPos); + ep.luaCode = params[i].substr(delimPos + 1); + if(ep.luaCode.empty()) { + SWSS_LOG_ERROR("Lua code missing after :\n"); + } + } + eventParams.push_back(ep); + } +} + +bool RsyslogPlugin::createRegexList() { + fstream regexFile; + json jsonList = json::array(); + regexFile.open(m_regexPath, ios::in); + if (!regexFile) { + SWSS_LOG_ERROR("No such path exists: %s for source %s\n", m_regexPath.c_str(), m_moduleName.c_str()); + return false; + } + try { + regexFile >> jsonList; + } catch (invalid_argument& iaException) { + SWSS_LOG_ERROR("Invalid JSON file: %s, throws exception: %s\n", m_regexPath.c_str(), iaException.what()); + return false; + } + + string regexString; + string timestampRegex = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*"; + regex expression; + vector regexList; + + for(long unsigned int i = 0; i < jsonList.size(); i++) { + RegexStruct rs = RegexStruct(); + vector eventParams; + try { + string eventRegex = jsonList[i]["regex"]; + regexString = timestampRegex + eventRegex; + string tag = jsonList[i]["tag"]; + vector params = jsonList[i]["params"]; + vector timestampParams = { "month", "day", "time" }; + params.insert(params.begin(), timestampParams.begin(), timestampParams.end()); + regex expr(regexString); + expression = expr; + parseParams(params, eventParams); + rs.params = eventParams; + rs.tag = tag; + rs.regexExpression = expression; + regexList.push_back(rs); + } catch (domain_error& deException) { + SWSS_LOG_ERROR("Missing required key, throws exception: %s\n", deException.what()); + return false; + } catch (regex_error& reException) { + SWSS_LOG_ERROR("Invalid regex, throws exception: %s\n", reException.what()); + return false; + } + } + + if(regexList.empty()) { + SWSS_LOG_ERROR("Empty list of regex expressions.\n"); + return false; + } + + m_parser->m_regexList = regexList; + + regexFile.close(); + return true; +} + +void RsyslogPlugin::run() { + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + while(true) { + string line; + getline(cin, line); + if(line.empty()) { + continue; + } + onMessage(line, luaState); + } + lua_close(luaState); +} + +int RsyslogPlugin::onInit() { + m_eventHandle = events_init_publisher(m_moduleName); + bool success = createRegexList(); + if(!success) { + return 1; // invalid regex error code + } else if(m_eventHandle == NULL) { + return 2; // event init publish error code + } + return 0; +} + +RsyslogPlugin::RsyslogPlugin(string moduleName, string regexPath) { + m_parser = unique_ptr(new SyslogParser()); + m_moduleName = moduleName; + m_regexPath = regexPath; +} diff --git a/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h new file mode 100644 index 000000000000..0811b5f3032f --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/rsyslog_plugin.h @@ -0,0 +1,40 @@ +#ifndef RSYSLOG_PLUGIN_H +#define RSYSLOG_PLUGIN_H + +extern "C" +{ + #include + #include + #include +} +#include +#include +#include "syslog_parser.h" +#include "events.h" +#include "logger.h" + +using namespace std; +using namespace swss; + +/** + * Rsyslog Plugin will utilize an instance of a syslog parser to read syslog messages from rsyslog.d and will continuously read from stdin + * A plugin instance is created for each container/host. + * + */ + +class RsyslogPlugin { +public: + int onInit(); + bool onMessage(string msg, lua_State* luaState); + void run(); + RsyslogPlugin(string moduleName, string regexPath); +private: + unique_ptr m_parser; + event_handle_t m_eventHandle; + string m_regexPath; + string m_moduleName; + bool createRegexList(); +}; + +#endif + diff --git a/src/sonic-eventd/rsyslog_plugin/subdir.mk b/src/sonic-eventd/rsyslog_plugin/subdir.mk new file mode 100644 index 000000000000..17df55c718a0 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/subdir.mk @@ -0,0 +1,13 @@ +CC := g++ + +RSYSLOG-PLUGIN-TEST_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/timestamp_formatter.o +RSYSLOG-PLUGIN_OBJS += ./rsyslog_plugin/rsyslog_plugin.o ./rsyslog_plugin/syslog_parser.o ./rsyslog_plugin/timestamp_formatter.o ./rsyslog_plugin/main.o + +C_DEPS += ./rsyslog_plugin/rsyslog_plugin.d ./rsyslog_plugin/syslog_parser.d ./rsyslog_plugin/timestamp_formatter.d ./rsyslog_plugin/main.d + +rsyslog_plugin/%.o: rsyslog_plugin/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst rsyslog_plugin/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$(@)" "$<" + @echo 'Finished building: $<' + @echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp new file mode 100644 index 000000000000..ebf7c598d15a --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.cpp @@ -0,0 +1,65 @@ +#include +#include +#include "syslog_parser.h" +#include "logger.h" + +/** + * Parses syslog message and returns structured event + * + * @param nessage us syslog message being fed in by rsyslog.d + * @return return structured event json for publishing + * +*/ + +bool SyslogParser::parseMessage(string message, string& eventTag, event_params_t& paramMap, lua_State* luaState) { + for(long unsigned int i = 0; i < m_regexList.size(); i++) { + smatch matchResults; + if(!regex_search(message, matchResults, m_regexList[i].regexExpression) || m_regexList[i].params.size() != matchResults.size() - 1 || matchResults.size() < 4) { + continue; + } + string formattedTimestamp; + if(!matchResults[1].str().empty() && !matchResults[2].str().empty() && !matchResults[3].str().empty()) { // found timestamp components + formattedTimestamp = m_timestampFormatter->changeTimestampFormat({ matchResults[1].str(), matchResults[2].str(), matchResults[3].str() }); + } + if(!formattedTimestamp.empty()) { + paramMap["timestamp"] = formattedTimestamp; + } else { + SWSS_LOG_INFO("Timestamp is invalid and is not able to be formatted"); + } + + // found matching regex + eventTag = m_regexList[i].tag; + // check params for lua code + for(long unsigned int j = 3; j < m_regexList[i].params.size(); j++) { + string resultValue = matchResults[j + 1].str(); + string paramName = m_regexList[i].params[j].paramName; + const char* luaCode = m_regexList[i].params[j].luaCode.c_str(); + + if(luaCode == NULL || *luaCode == 0) { + SWSS_LOG_INFO("Invalid lua code, empty or missing"); + paramMap[paramName] = resultValue; + continue; + } + + // execute lua code + lua_pushstring(luaState, resultValue.c_str()); + lua_setglobal(luaState, "arg"); + if(luaL_dostring(luaState, luaCode) == 0) { + lua_pop(luaState, lua_gettop(luaState)); + } else { // error in lua code + SWSS_LOG_ERROR("Invalid lua code, unable to do operation.\n"); + paramMap[paramName] = resultValue; + continue; + } + lua_getglobal(luaState, "ret"); + paramMap[paramName] = lua_tostring(luaState, -1); + lua_pop(luaState, 1); + } + return true; + } + return false; +} + +SyslogParser::SyslogParser() { + m_timestampFormatter = unique_ptr(new TimestampFormatter()); +} diff --git a/src/sonic-eventd/rsyslog_plugin/syslog_parser.h b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h new file mode 100644 index 000000000000..6293eb3c4a34 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/syslog_parser.h @@ -0,0 +1,46 @@ +#ifndef SYSLOG_PARSER_H +#define SYSLOG_PARSER_H + +extern "C" +{ + #include + #include + #include +} + +#include +#include +#include +#include "json.hpp" +#include "events.h" +#include "timestamp_formatter.h" + +using namespace std; +using json = nlohmann::json; + +struct EventParam { + string paramName; + string luaCode; +}; + +struct RegexStruct { + regex regexExpression; + vector params; + string tag; +}; + +/** + * Syslog Parser is responsible for parsing log messages fed by rsyslog.d and returns + * matched result to rsyslog_plugin to use with events publish API + * + */ + +class SyslogParser { +public: + unique_ptr m_timestampFormatter; + vector m_regexList; + bool parseMessage(string message, string& tag, event_params_t& paramDict, lua_State* luaState); + SyslogParser(); +}; + +#endif diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp new file mode 100644 index 000000000000..cc179adbbc75 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.cpp @@ -0,0 +1,74 @@ +#include +#include "timestamp_formatter.h" +#include "logger.h" +#include "events.h" + +using namespace std; + +/*** + * + * Formats given string into string needed by YANG model + * + * @param timestamp parsed from syslog message + * @return formatted timestamp that conforms to YANG model + * + */ + +static const unordered_map g_monthDict { + { "Jan", "01" }, + { "Feb", "02" }, + { "Mar", "03" }, + { "Apr", "04" }, + { "May", "05" }, + { "Jun", "06" }, + { "Jul", "07" }, + { "Aug", "08" }, + { "Sep", "09" }, + { "Oct", "10" }, + { "Nov", "11" }, + { "Dec", "12" } +}; + +string TimestampFormatter::getYear(string timestamp) { + if(!m_storedTimestamp.empty()) { + if(m_storedTimestamp.compare(timestamp) <= 0) { + m_storedTimestamp = timestamp; + return m_storedYear; + } + } + // no last timestamp or year change + time_t currentTime = time(nullptr); + tm* const localTime = localtime(¤tTime); + stringstream ss; + auto currentYear = 1900 + localTime->tm_year; + ss << currentYear; // get current year + string year = ss.str(); + m_storedTimestamp = timestamp; + m_storedYear = year; + return year; +} + +string TimestampFormatter::changeTimestampFormat(vector dateComponents) { + if(dateComponents.size() < 3) { + SWSS_LOG_ERROR("Timestamp formatter unable to format due to invalid input"); + return ""; + } + string formattedTimestamp; // need to change format of Mmm dd hh:mm:ss.SSSSSS to YYYY-mm-ddThh:mm:ss.SSSSSSZ + string month; + auto it = g_monthDict.find(dateComponents[0]); + if(it != g_monthDict.end()) { + month = it->second; + } else { + SWSS_LOG_ERROR("Timestamp month was given in wrong format.\n"); + return ""; + } + string day = dateComponents[1]; + if(day.size() == 1) { // convert 1 -> 01 + day.insert(day.begin(), '0'); + } + string time = dateComponents[2]; + string currentTimestamp = month + day + time; + string year = getYear(currentTimestamp); + formattedTimestamp = year + "-" + month + "-" + day + "T" + time + "Z"; + return formattedTimestamp; +} diff --git a/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h new file mode 100644 index 000000000000..ea99c4cfcb8c --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin/timestamp_formatter.h @@ -0,0 +1,27 @@ +#ifndef TIMESTAMP_FORMATTER_H +#define TIMESTAMP_FORMATTER_H + +#include +#include +#include +#include +#include + +using namespace std; + +/*** + * + * TimestampFormatter is responsible for formatting the timestamps received in syslog messages and to format them into the type needed by YANG model + * + */ + +class TimestampFormatter { +public: + string changeTimestampFormat(vector dateComponents); + string m_storedTimestamp; + string m_storedYear; +private: + string getYear(string timestamp); +}; + +#endif diff --git a/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp new file mode 100644 index 000000000000..be5a19ad5a5b --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/rsyslog_plugin_ut.cpp @@ -0,0 +1,274 @@ +extern "C" +{ + #include + #include + #include +} +#include +#include +#include +#include +#include "gtest/gtest.h" +#include "json.hpp" +#include "events.h" +#include "../rsyslog_plugin/rsyslog_plugin.h" +#include "../rsyslog_plugin/syslog_parser.h" +#include "../rsyslog_plugin/timestamp_formatter.h" + +using namespace std; +using namespace swss; +using json = nlohmann::json; + +vector createEventParams(vector params, vector luaCodes) { + vector eventParams; + for(long unsigned int i = 0; i < params.size(); i++) { + EventParam ep = EventParam(); + ep.paramName = params[i]; + ep.luaCode = luaCodes[i]; + eventParams.push_back(ep); + } + return eventParams; +} + +TEST(syslog_parser, matching_regex) { + json jList = json::array(); + vector regexList; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*) even_more_data (.*)"; + vector params = { "month", "day", "time", "message", "other_data", "even_more_data" }; + vector luaCodes = { "", "", "", "", "", "" }; + regex expression(regexString); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["message"] = "test_message"; + expectedDict["other_data"] = "test_data"; + expectedDict["even_more_data"] = "test_data"; + + unique_ptr parser(new SyslogParser()); + parser->m_regexList = regexList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("message test_message other_data test_data even_more_data test_data", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); +} + +TEST(syslog_parser, matching_regex_timestamp) { + json jList = json::array(); + vector regexList; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*message (.*) other_data (.*)"; + vector params = { "month", "day", "time", "message", "other_data" }; + vector luaCodes = { "", "", "", "", "" }; + regex expression(regexString); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["message"] = "test_message"; + expectedDict["other_data"] = "test_data"; + expectedDict["timestamp"] = "2022-07-21T02:10:00.000000Z"; + + unique_ptr parser(new SyslogParser()); + parser->m_regexList = regexList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("Jul 21 02:10:00.000000 message test_message other_data test_data", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); +} + +TEST(syslog_parser, no_matching_regex) { + json jList = json::array(); + vector regexList; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*no match"; + vector params = { "month", "day", "time" }; + vector luaCodes = { "", "", "" }; + regex expression(regexString); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); + + string tag; + event_params_t paramDict; + + unique_ptr parser(new SyslogParser()); + parser->m_regexList = regexList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("Test Message", tag, paramDict, luaState); + EXPECT_EQ(false, success); + + lua_close(luaState); +} + +TEST(syslog_parser, lua_code_valid_1) { + json jList = json::array(); + vector regexList; + string regexString = "^([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + vector params = { "month", "day", "time", "is-sent", "ip", "major-code", "minor-code" }; + vector luaCodes = { "", "", "", "ret=tostring(arg==\"sent\")", "", "", "" }; + regex expression(regexString); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["is-sent"] = "true"; + expectedDict["ip"] = "100.95.147.229"; + expectedDict["major-code"] = "2"; + expectedDict["minor-code"] = "2"; + + unique_ptr parser(new SyslogParser()); + parser->m_regexList = regexList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("NOTIFICATION: sent to neighbor 100.95.147.229 active 2/2 (peer in wrong AS) 2 bytes", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); +} + +TEST(syslog_parser, lua_code_valid_2) { + json jList = json::array(); + vector regexList; + string regexString = "([a-zA-Z]{3})?\\s*([0-9]{1,2})?\\s*([0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{0,6})?\\s*.* (sent|received) (?:to|from) .* ([0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}.[0-9]{2,3}) active ([1-9]{1,3})/([1-9]{1,3}) .*"; + vector params = { "month", "day", "time", "is-sent", "ip", "major-code", "minor-code" }; + vector luaCodes = { "", "", "", "ret=tostring(arg==\"sent\")", "", "", "" }; + regex expression(regexString); + + RegexStruct rs = RegexStruct(); + rs.tag = "test_tag"; + rs.regexExpression = expression; + rs.params = createEventParams(params, luaCodes); + regexList.push_back(rs); + + string tag; + event_params_t paramDict; + + event_params_t expectedDict; + expectedDict["is-sent"] = "false"; + expectedDict["ip"] = "10.10.24.216"; + expectedDict["major-code"] = "6"; + expectedDict["minor-code"] = "2"; + expectedDict["timestamp"] = "2022-12-03T12:36:24.503424Z"; + + unique_ptr parser(new SyslogParser()); + parser->m_regexList = regexList; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + + bool success = parser->parseMessage("Dec 3 12:36:24.503424 NOTIFICATION: received from neighbor 10.10.24.216 active 6/2 (Administrative Shutdown) 0 bytes", tag, paramDict, luaState); + EXPECT_EQ(true, success); + EXPECT_EQ("test_tag", tag); + EXPECT_EQ(expectedDict, paramDict); + + lua_close(luaState); +} + +TEST(rsyslog_plugin, onInit_emptyJSON) { + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_1.rc.json")); + EXPECT_NE(0, plugin->onInit()); +} + +TEST(rsyslog_plugin, onInit_missingRegex) { + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_3.rc.json")); + EXPECT_NE(0, plugin->onInit()); +} + +TEST(rsyslog_plugin, onInit_invalidRegex) { + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_4.rc.json")); + EXPECT_NE(0, plugin->onInit()); +} + +TEST(rsyslog_plugin, onMessage) { + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_2.rc.json")); + EXPECT_EQ(0, plugin->onInit()); + ifstream infile("./rsyslog_plugin_tests/test_syslogs.txt"); + string logMessage; + bool parseResult; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + while(infile >> logMessage >> parseResult) { + EXPECT_EQ(parseResult, plugin->onMessage(logMessage, luaState)); + } + lua_close(luaState); + infile.close(); +} + +TEST(rsyslog_plugin, onMessage_noParams) { + unique_ptr plugin(new RsyslogPlugin("test_mod_name", "./rsyslog_plugin_tests/test_regex_5.rc.json")); + EXPECT_EQ(0, plugin->onInit()); + ifstream infile("./rsyslog_plugin_tests/test_syslogs_2.txt"); + string logMessage; + bool parseResult; + lua_State* luaState = luaL_newstate(); + luaL_openlibs(luaState); + while(infile >> logMessage >> parseResult) { + EXPECT_EQ(parseResult, plugin->onMessage(logMessage, luaState)); + } + lua_close(luaState); + infile.close(); +} + +TEST(timestampFormatter, changeTimestampFormat) { + unique_ptr formatter(new TimestampFormatter()); + + vector timestampOne = { "Jul", "20", "10:09:40.230874" }; + vector timestampTwo = { "Jan", "1", "00:00:00.000000" }; + vector timestampThree = { "Dec", "31", "23:59:59.000000" }; + + string formattedTimestampOne = formatter->changeTimestampFormat(timestampOne); + EXPECT_EQ("2022-07-20T10:09:40.230874Z", formattedTimestampOne); + + EXPECT_EQ("072010:09:40.230874", formatter->m_storedTimestamp); + + string formattedTimestampTwo = formatter->changeTimestampFormat(timestampTwo); + EXPECT_EQ("2022-01-01T00:00:00.000000Z", formattedTimestampTwo); + + formatter->m_storedTimestamp = "010100:00:00.000000"; + formatter->m_storedYear = "2025"; + + string formattedTimestampThree = formatter->changeTimestampFormat(timestampThree); + EXPECT_EQ("2025-12-31T23:59:59.000000Z", formattedTimestampThree); +} + +int main(int argc, char* argv[]) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk new file mode 100644 index 000000000000..6be7ef09786a --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +RSYSLOG-PLUGIN-TEST_OBJS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.o + +C_DEPS += ./rsyslog_plugin_tests/rsyslog_plugin_ut.d + +rsyslog_plugin_tests/%.o: rsyslog_plugin_tests/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst rsyslog_plugin_tests/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_1.rc.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json new file mode 100644 index 000000000000..66788d326331 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_2.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "bgp-state", + "regex": ".* %ADJCHANGE: neighbor (.*) (Up|Down) .*", + "params": ["neighbor_ip", "state" ] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json new file mode 100644 index 000000000000..2e67e88f8448 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_3.rc.json @@ -0,0 +1,6 @@ +[ + { + "tag": "TEST-TAG-NO-REGEX", + "param": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json new file mode 100644 index 000000000000..c3a875aded0f --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_4.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "TEST-TAG-INVALID-REGEX", + "regex": "+++ ++++(", + "params": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json new file mode 100644 index 000000000000..ddaf37c931a8 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_regex_5.rc.json @@ -0,0 +1,7 @@ +[ + { + "tag": "test_tag", + "regex": ".*", + "params": [] + } +] diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt new file mode 100644 index 000000000000..78f89aec3d28 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs.txt @@ -0,0 +1,4 @@ +"Aug 17 02:39:21.286611 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.90 Down Neighbor deleted" true +"Aug 17 02:46:42.615668 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.90 Up" true +"Aug 17 04:46:51.290979 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %ADJCHANGE: neighbor 100.126.188.78 Down Neighbor deleted" true +"Aug 17 04:46:51.290979 SN6-0101-0114-02T0 INFO bgp#bgpd[62]: %NOEVENT: no event" false diff --git a/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt new file mode 100644 index 000000000000..d56615f61681 --- /dev/null +++ b/src/sonic-eventd/rsyslog_plugin_tests/test_syslogs_2.txt @@ -0,0 +1,3 @@ +testMessage true +another_test_message true + true diff --git a/src/sonic-eventd/src/eventd.cpp b/src/sonic-eventd/src/eventd.cpp new file mode 100644 index 000000000000..1ff9dd8be20b --- /dev/null +++ b/src/sonic-eventd/src/eventd.cpp @@ -0,0 +1,798 @@ +#include +#include "eventd.h" +#include "dbconnector.h" + +/* + * There are 5 threads, including the main + * + * (0) main thread -- Runs eventd service that accepts commands event_req_type_t + * This can be used to control caching events and a no-op echo service. + * + * (1) capture/cache service + * Saves all the events between cache start & stop. + * Update missed cached counter in memory. + * + * (2) Main proxy service that runs XSUB/XPUB ends + * + * (3) Get stats for total published counter in memory. This thread also sends + * heartbeat message. It accomplishes by counting upon receive missed due + * to event receive timeout. + * + * (4) Thread to update counters from memory to redis periodically. + * + */ + +using namespace std; +using namespace swss; + +#define MB(N) ((N) * 1024 * 1024) +#define EVT_SIZE_AVG 150 + +#define MAX_CACHE_SIZE (MB(100) / (EVT_SIZE_AVG)) + +/* Count of elements returned in each read */ +#define READ_SET_SIZE 100 + +#define VEC_SIZE(p) ((int)p.size()) + +/* Sock read timeout in milliseconds, to enable look for control signals */ +#define CAPTURE_SOCK_TIMEOUT 800 + +#define HEARTBEAT_INTERVAL_SECS 2 /* Default: 2 seconds */ + +/* Source & tag for heartbeat events */ +#define EVENTD_PUBLISHER_SOURCE "sonic-events-eventd" +#define EVENTD_HEARTBEAT_TAG "heartbeat" + + +const char *counter_keys[COUNTERS_EVENTS_TOTAL] = { + COUNTERS_EVENTS_PUBLISHED, + COUNTERS_EVENTS_MISSED_CACHE +}; + +static bool s_unit_testing = false; + +int +eventd_proxy::init() +{ + int ret = -1, rc = 0; + SWSS_LOG_INFO("Start xpub/xsub proxy"); + + m_frontend = zmq_socket(m_ctx, ZMQ_XSUB); + RET_ON_ERR(m_frontend != NULL, "failing to get ZMQ_XSUB socket"); + + rc = zmq_bind(m_frontend, get_config(string(XSUB_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind XSUB to %s", get_config(string(XSUB_END_KEY)).c_str()); + + m_backend = zmq_socket(m_ctx, ZMQ_XPUB); + RET_ON_ERR(m_backend != NULL, "failing to get ZMQ_XPUB socket"); + + rc = zmq_bind(m_backend, get_config(string(XPUB_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind XPUB to %s", get_config(string(XPUB_END_KEY)).c_str()); + + m_capture = zmq_socket(m_ctx, ZMQ_PUB); + RET_ON_ERR(m_capture != NULL, "failing to get ZMQ_PUB socket for capture"); + + rc = zmq_bind(m_capture, get_config(string(CAPTURE_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind capture PUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); + + m_thr = thread(&eventd_proxy::run, this); + ret = 0; +out: + return ret; +} + +void +eventd_proxy::run() +{ + SWSS_LOG_INFO("Running xpub/xsub proxy"); + + /* runs forever until zmq context is terminated */ + zmq_proxy(m_frontend, m_backend, m_capture); + + SWSS_LOG_INFO("Stopped xpub/xsub proxy"); +} + + +stats_collector::stats_collector() : + m_shutdown(false), m_pause_heartbeat(false), m_heartbeats_published(0), + m_heartbeats_interval_cnt(0) +{ + set_heartbeat_interval(HEARTBEAT_INTERVAL_SECS); + for (int i=0; i < COUNTERS_EVENTS_TOTAL; ++i) { + m_lst_counters[i] = 0; + } + m_updated = false; +} + + +void +stats_collector::set_heartbeat_interval(int val) +{ + if (val > 0) { + /* Round to highest possible multiples of MIN */ + m_heartbeats_interval_cnt = + (((val * 1000) + STATS_HEARTBEAT_MIN - 1) / STATS_HEARTBEAT_MIN); + } + else if (val == 0) { + /* Least possible */ + m_heartbeats_interval_cnt = 1; + } + else if (val == -1) { + /* Turn off heartbeat */ + m_heartbeats_interval_cnt = 0; + SWSS_LOG_INFO("Heartbeat turned OFF"); + } + /* Any other value is ignored as invalid */ + + SWSS_LOG_INFO("Set heartbeat: val=%d secs cnt=%d min=%d ms final=%d secs", + val, m_heartbeats_interval_cnt, STATS_HEARTBEAT_MIN, + (m_heartbeats_interval_cnt * STATS_HEARTBEAT_MIN / 1000)); +} + + +int +stats_collector::get_heartbeat_interval() +{ + return m_heartbeats_interval_cnt * STATS_HEARTBEAT_MIN / 1000; +} + +int +stats_collector::start() +{ + int rc = -1; + + if (!s_unit_testing) { + try { + m_counters_db = make_shared("COUNTERS_DB", 0, true); + } + catch (exception &e) + { + SWSS_LOG_ERROR("Unable to get DB Connector, e=(%s)\n", e.what()); + } + RET_ON_ERR(m_counters_db != NULL, "Failed to get COUNTERS_DB"); + + m_stats_table = make_shared( + m_counters_db.get(), COUNTERS_EVENTS_TABLE); + RET_ON_ERR(m_stats_table != NULL, "Failed to get events table"); + + m_thr_writer = thread(&stats_collector::run_writer, this); + } + m_thr_collector = thread(&stats_collector::run_collector, this); + rc = 0; +out: + return rc; +} + +void +stats_collector::run_writer() +{ + while (true) { + if (m_updated.exchange(false)) { + /* Update if there had been any update */ + + for (int i = 0; i < COUNTERS_EVENTS_TOTAL; ++i) { + vector fv; + + fv.emplace_back(EVENTS_STATS_FIELD_NAME, to_string(m_lst_counters[i])); + + m_stats_table->set(counter_keys[i], fv); + } + } + if (m_shutdown) { + break; + } + this_thread::sleep_for(chrono::milliseconds(10)); + /* + * After sleep always do an update if needed before checking + * shutdown flag, as any counters collected during sleep + * needs to be updated. + */ + } + + m_stats_table.reset(); + m_counters_db.reset(); +} + +void +stats_collector::run_collector() +{ + int hb_cntr = 0; + string hb_key = string(EVENTD_PUBLISHER_SOURCE) + ":" + EVENTD_HEARTBEAT_TAG; + event_handle_t pub_handle = NULL; + event_handle_t subs_handle = NULL; + + /* + * A subscriber is required to set a subscription. Else all published + * events will be dropped at the point of publishing itself. + */ + pub_handle = events_init_publisher(EVENTD_PUBLISHER_SOURCE); + RET_ON_ERR(pub_handle != NULL, + "failed to create publisher handle for heartbeats"); + + subs_handle = events_init_subscriber(false, STATS_HEARTBEAT_MIN); + RET_ON_ERR(subs_handle != NULL, "failed to subscribe to all"); + + /* + * Though we can count off of capture socket, then we need to duplicate + * code in event_receive which has the logic to count all missed per + * runtime id. It also has logic to retire closed runtime IDs. + * + * So use regular subscriber API w/o cache but timeout to enable + * exit, upon shutdown. + */ + /* + * The collector service runs until shutdown. + * The only task is to update total_published & total_missed_internal. + * The write of these counters into redis is done by another thread. + */ + + while(!m_shutdown) { + event_receive_op_t op; + int rc = 0; + + try { + rc = event_receive(subs_handle, op); + } + catch (exception& e) + { + rc = -1; + stringstream ss; + ss << e.what(); + SWSS_LOG_ERROR("Receive event failed with %s", ss.str().c_str()); + } + + if ((rc == 0) && (op.key != hb_key)) { + /* TODO: Discount EVENT_STR_CTRL_DEINIT messages too */ + increment_published(1+op.missed_cnt); + + /* reset counter on receive to restart. */ + hb_cntr = 0; + } + else { + if (rc < 0) { + SWSS_LOG_ERROR( + "event_receive failed with rc=%d; stats:published(%lu)", rc, + m_lst_counters[INDEX_COUNTERS_EVENTS_PUBLISHED]); + } + if (!m_pause_heartbeat && (m_heartbeats_interval_cnt > 0) && + ++hb_cntr >= m_heartbeats_interval_cnt) { + rc = event_publish(pub_handle, EVENTD_HEARTBEAT_TAG); + if (rc != 0) { + SWSS_LOG_ERROR("Failed to publish heartbeat rc=%d", rc); + } + hb_cntr = 0; + ++m_heartbeats_published; + } + } + } + +out: + /* + * NOTE: A shutdown could lose messages in cache. + * But consider, that eventd shutdown is a critical shutdown as it would + * bring down all other features. Hence done only at system level shutdown, + * hence losing few messages in flight is acceptable. Any more complex code + * to handle is unwanted. + */ + + events_deinit_subscriber(subs_handle); + events_deinit_publisher(pub_handle); + m_shutdown = true; +} + +capture_service::~capture_service() +{ + stop_capture(); +} + +void +capture_service::stop_capture() +{ + m_ctrl = STOP_CAPTURE; + + if (m_thr.joinable()) { + m_thr.join(); + } +} + +static bool +validate_event(const internal_event_t &event, runtime_id_t &rid, sequence_t &seq) +{ + bool ret = false; + + internal_event_t::const_iterator itc_r, itc_s, itc_e; + itc_r = event.find(EVENT_RUNTIME_ID); + itc_s = event.find(EVENT_SEQUENCE); + itc_e = event.find(EVENT_STR_DATA); + + if ((itc_r != event.end()) && (itc_s != event.end()) && (itc_e != event.end())) { + ret = true; + rid = itc_r->second; + seq = str_to_seq(itc_s->second); + } + else { + SWSS_LOG_ERROR("Invalid evt: %s", map_to_str(event).c_str()); + } + + return ret; +} + + +/* + * Initialize cache with set of events provided. + * Events read by cache service will be appended + */ +void +capture_service::init_capture_cache(const event_serialized_lst_t &lst) +{ + /* Cache given events as initial stock. + * Save runtime ID with last seen seq to avoid duplicates, while reading + * from capture socket. + * No check for max cache size here, as most likely not needed. + */ + for (event_serialized_lst_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { + internal_event_t event; + + if (deserialize(*itc, event) == 0) { + runtime_id_t rid; + sequence_t seq; + + if (validate_event(event, rid, seq)) { + m_pre_exist_id[rid] = seq; + m_events.push_back(*itc); + } + } + } +} + + +void +capture_service::do_capture() +{ + int rc; + int block_ms=CAPTURE_SOCK_TIMEOUT; + int init_cnt; + void *cap_sub_sock = NULL; + counters_t total_overflow = 0; + + typedef enum { + /* + * In this state every event read is compared with init cache given + * Only new events are saved. + */ + CAP_STATE_INIT = 0, + + /* In this state, all events read are cached until max limit */ + CAP_STATE_ACTIVE, + + /* Cache has hit max. Hence only save last event for each runime ID */ + CAP_STATE_LAST + } cap_state_t; + + cap_state_t cap_state = CAP_STATE_INIT; + + /* + * Need subscription for publishers to publish. + * The stats collector service already has active subscriber for all. + */ + + cap_sub_sock = zmq_socket(m_ctx, ZMQ_SUB); + RET_ON_ERR(cap_sub_sock != NULL, "failing to get ZMQ_SUB socket"); + + rc = zmq_connect(cap_sub_sock, get_config(string(CAPTURE_END_KEY)).c_str()); + RET_ON_ERR(rc == 0, "Failing to bind capture SUB to %s", get_config(string(CAPTURE_END_KEY)).c_str()); + + rc = zmq_setsockopt(cap_sub_sock, ZMQ_SUBSCRIBE, "", 0); + RET_ON_ERR(rc == 0, "Failing to ZMQ_SUBSCRIBE"); + + rc = zmq_setsockopt(cap_sub_sock, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms)); + RET_ON_ERR(rc == 0, "Failed to ZMQ_RCVTIMEO to %d", block_ms); + + m_cap_run = true; + + while (m_ctrl != START_CAPTURE) { + /* Wait for capture start */ + this_thread::sleep_for(chrono::milliseconds(10)); + } + + /* + * The cache service connects but defers any reading until caller provides + * the startup cache. But all events that arrived since connect, though not read + * will be held by ZMQ in its local cache. + * + * When cache service starts reading, check against the initial stock for duplicates. + * m_pre_exist_id caches the last seq number in initial stock for each runtime id. + * So only allow sequence number greater than cached number. + * + * Theoretically all the events provided via initial stock could be duplicates. + * Hence until as many events as in initial stock or until the cached id map + * is empty, do this check. + */ + init_cnt = (int)m_events.size(); + + /* Read until STOP_CAPTURE */ + while(m_ctrl == START_CAPTURE) { + runtime_id_t rid; + sequence_t seq; + internal_event_t event; + string source, evt_str; + + if ((rc = zmq_message_read(cap_sub_sock, 0, source, event)) != 0) { + /* + * The capture socket captures SUBSCRIBE requests too. + * The messge could contain subscribe filter strings and binary code. + * Empty string with binary code will fail to deserialize. + * Else would fail event validation. + */ + RET_ON_ERR((rc == EAGAIN) || (rc == ERR_MESSAGE_INVALID), + "0:Failed to read from capture socket"); + continue; + } + if (!validate_event(event, rid, seq)) { + continue; + } + serialize(event, evt_str); + + switch(cap_state) { + case CAP_STATE_INIT: + /* + * In this state check against cache, if duplicate + * When duplicate or new one seen, remove the entry from pre-exist map + * Stay in this state, until the pre-exist cache is empty or as many + * messages as in cache are seen, as in worst case even if you see + * duplicate of each, it will end with first m_events.size() + */ + { + bool add = true; + init_cnt--; + pre_exist_id_t::iterator it = m_pre_exist_id.find(rid); + + if (it != m_pre_exist_id.end()) { + if (seq <= it->second) { + /* Duplicate; Later/same seq in cache. */ + add = false; + } + if (seq >= it->second) { + /* new one; This runtime ID need not be checked again */ + m_pre_exist_id.erase(it); + } + } + if (add) { + m_events.push_back(evt_str); + } + } + if(m_pre_exist_id.empty() || (init_cnt <= 0)) { + /* Init check is no more needed. */ + pre_exist_id_t().swap(m_pre_exist_id); + cap_state = CAP_STATE_ACTIVE; + } + break; + + case CAP_STATE_ACTIVE: + /* Save until max allowed */ + try + { + m_events.push_back(evt_str); + if (VEC_SIZE(m_events) >= m_cache_max) { + cap_state = CAP_STATE_LAST; + /* Clear the map, created to ensure memory space available */ + m_last_events.clear(); + m_last_events_init = true; + } + break; + } + catch (bad_alloc& e) + { + stringstream ss; + ss << e.what(); + SWSS_LOG_ERROR("Cache save event failed with %s events:size=%d", + ss.str().c_str(), VEC_SIZE(m_events)); + cap_state = CAP_STATE_LAST; + // fall through to save this event in last set. + } + + case CAP_STATE_LAST: + total_overflow++; + m_last_events[rid] = evt_str; + if (total_overflow > m_last_events.size()) { + m_total_missed_cache++; + m_stats_instance->increment_missed_cache(1); + } + break; + } + } + +out: + /* + * Capture stop will close the socket which fail the read + * and hence bail out. + */ + zmq_close(cap_sub_sock); + m_cap_run = false; + return; +} + + +int +capture_service::set_control(capture_control_t ctrl, event_serialized_lst_t *lst) +{ + int ret = -1; + + /* Can go in single step only. */ + RET_ON_ERR((ctrl - m_ctrl) == 1, "m_ctrl(%d)+1 < ctrl(%d)", m_ctrl, ctrl); + + switch(ctrl) { + case INIT_CAPTURE: + m_thr = thread(&capture_service::do_capture, this); + for(int i=0; !m_cap_run && (i < 100); ++i) { + /* Wait max a second for thread to init */ + this_thread::sleep_for(chrono::milliseconds(10)); + } + RET_ON_ERR(m_cap_run, "Failed to init capture"); + m_ctrl = ctrl; + ret = 0; + break; + + case START_CAPTURE: + + /* + * Reserve a MAX_PUBLISHERS_COUNT entries for last events, as we use it only + * upon m_events/vector overflow, which might block adding new entries in map + * if overall mem consumption is too high. Clearing the map just before use + * is likely to help. + */ + for (int i=0; iempty())) { + init_capture_cache(*lst); + } + m_ctrl = ctrl; + ret = 0; + break; + + + case STOP_CAPTURE: + /* + * Caller would have initiated SUBS channel. + * Read for CACHE_DRAIN_IN_MILLISECS to drain off cache + * before stopping. + */ + this_thread::sleep_for(chrono::milliseconds(CACHE_DRAIN_IN_MILLISECS)); + stop_capture(); + ret = 0; + break; + + default: + SWSS_LOG_ERROR("Unexpected code=%d", ctrl); + break; + } +out: + return ret; +} + +int +capture_service::read_cache(event_serialized_lst_t &lst_fifo, + last_events_t &lst_last, counters_t &overflow_cnt) +{ + lst_fifo.swap(m_events); + if (m_last_events_init) { + lst_last.swap(m_last_events); + } else { + last_events_t().swap(lst_last); + } + last_events_t().swap(m_last_events); + event_serialized_lst_t().swap(m_events); + overflow_cnt = m_total_missed_cache; + return 0; +} + +static int +process_options(stats_collector *stats, const event_serialized_lst_t &req_data, + event_serialized_lst_t &resp_data) +{ + int ret = -1; + if (!req_data.empty()) { + RET_ON_ERR(req_data.size() == 1, "Expect only one options string %d", + (int)req_data.size()); + const auto &data = nlohmann::json::parse(*(req_data.begin())); + RET_ON_ERR(data.size() == 1, "Only one supported option. Expect 1. size=%d", + (int)data.size()); + const auto it = data.find(GLOBAL_OPTION_HEARTBEAT); + RET_ON_ERR(it != data.end(), "Expect HEARTBEAT_INTERVAL; got %s", + data.begin().key().c_str()); + stats->set_heartbeat_interval(it.value()); + ret = 0; + } + else { + nlohmann::json msg = nlohmann::json::object(); + msg[GLOBAL_OPTION_HEARTBEAT] = stats->get_heartbeat_interval(); + resp_data.push_back(msg.dump()); + ret = 0; + } +out: + return ret; +} + + +void +run_eventd_service() +{ + int code = 0; + int cache_max; + event_service service; + stats_collector stats_instance; + eventd_proxy *proxy = NULL; + capture_service *capture = NULL; + + event_serialized_lst_t capture_fifo_events; + last_events_t capture_last_events; + + SWSS_LOG_INFO("Eventd service starting\n"); + + void *zctx = zmq_ctx_new(); + RET_ON_ERR(zctx != NULL, "Failed to get zmq ctx"); + + cache_max = get_config_data(string(CACHE_MAX_CNT), (int)MAX_CACHE_SIZE); + RET_ON_ERR(cache_max > 0, "Failed to get CACHE_MAX_CNT"); + + proxy = new eventd_proxy(zctx); + RET_ON_ERR(proxy != NULL, "Failed to create proxy"); + + RET_ON_ERR(proxy->init() == 0, "Failed to init proxy"); + + RET_ON_ERR(service.init_server(zctx) == 0, "Failed to init service"); + + RET_ON_ERR(stats_instance.start() == 0, "Failed to start stats collector"); + + /* Pause heartbeat during caching */ + stats_instance.heartbeat_ctrl(true); + + /* + * Start cache service, right upon eventd starts so as not to lose + * events until telemetry starts. + * Telemetry will send a stop & collect cache upon startup + */ + capture = new capture_service(zctx, cache_max, &stats_instance); + RET_ON_ERR(capture->set_control(INIT_CAPTURE) == 0, "Failed to init capture"); + RET_ON_ERR(capture->set_control(START_CAPTURE) == 0, "Failed to start capture"); + + this_thread::sleep_for(chrono::milliseconds(200)); + RET_ON_ERR(stats_instance.is_running(), "Failed to start stats instance"); + + while(code != EVENT_EXIT) { + int resp = -1; + event_serialized_lst_t req_data, resp_data; + + RET_ON_ERR(service.channel_read(code, req_data) == 0, + "Failed to read request"); + + switch(code) { + case EVENT_CACHE_INIT: + /* connect only*/ + if (capture != NULL) { + delete capture; + } + event_serialized_lst_t().swap(capture_fifo_events); + last_events_t().swap(capture_last_events); + + capture = new capture_service(zctx, cache_max, &stats_instance); + if (capture != NULL) { + resp = capture->set_control(INIT_CAPTURE); + } + break; + + + case EVENT_CACHE_START: + if (capture == NULL) { + SWSS_LOG_ERROR("Cache is not initialized to start"); + resp = -1; + break; + } + /* Pause heartbeat during caching */ + stats_instance.heartbeat_ctrl(true); + + resp = capture->set_control(START_CAPTURE, &req_data); + break; + + + case EVENT_CACHE_STOP: + if (capture == NULL) { + SWSS_LOG_ERROR("Cache is not initialized to stop"); + resp = -1; + break; + } + resp = capture->set_control(STOP_CAPTURE); + if (resp == 0) { + counters_t overflow; + resp = capture->read_cache(capture_fifo_events, capture_last_events, + overflow); + } + delete capture; + capture = NULL; + + /* Unpause heartbeat upon stop caching */ + stats_instance.heartbeat_ctrl(); + break; + + + case EVENT_CACHE_READ: + if (capture != NULL) { + SWSS_LOG_ERROR("Cache is not stopped yet."); + resp = -1; + break; + } + resp = 0; + + if (capture_fifo_events.empty()) { + for (last_events_t::iterator it = capture_last_events.begin(); + it != capture_last_events.end(); ++it) { + capture_fifo_events.push_back(it->second); + } + last_events_t().swap(capture_last_events); + } + + { + int sz = VEC_SIZE(capture_fifo_events) < READ_SET_SIZE ? + VEC_SIZE(capture_fifo_events) : READ_SET_SIZE; + + if (sz != 0) { + auto it = std::next(capture_fifo_events.begin(), sz); + move(capture_fifo_events.begin(), capture_fifo_events.end(), + back_inserter(resp_data)); + + if (sz == VEC_SIZE(capture_fifo_events)) { + event_serialized_lst_t().swap(capture_fifo_events); + } else { + capture_fifo_events.erase(capture_fifo_events.begin(), it); + } + } + } + break; + + + case EVENT_ECHO: + resp = 0; + resp_data.swap(req_data); + break; + + case EVENT_OPTIONS: + resp = process_options(&stats_instance, req_data, resp_data); + break; + + case EVENT_EXIT: + resp = 0; + break; + + default: + SWSS_LOG_ERROR("Unexpected request: %d", code); + assert(false); + break; + } + RET_ON_ERR(service.channel_write(resp, resp_data) == 0, + "Failed to write response back"); + } +out: + service.close_service(); + stats_instance.stop(); + + if (proxy != NULL) { + delete proxy; + } + if (capture != NULL) { + delete capture; + } + if (zctx != NULL) { + zmq_ctx_term(zctx); + } + SWSS_LOG_ERROR("Eventd service exiting\n"); +} + +void set_unit_testing(bool b) +{ + s_unit_testing = b; +} + + diff --git a/src/sonic-eventd/src/eventd.h b/src/sonic-eventd/src/eventd.h new file mode 100644 index 000000000000..8411223b35be --- /dev/null +++ b/src/sonic-eventd/src/eventd.h @@ -0,0 +1,268 @@ +/* + * Header file for eventd daemon + */ +#include "table.h" +#include "events_service.h" +#include "events.h" +#include "events_wrap.h" + +#define ARRAY_SIZE(l) (sizeof(l)/sizeof((l)[0])) + +typedef map last_events_t; + +/* stat counters */ +typedef uint64_t counters_t; + +typedef enum { + INDEX_COUNTERS_EVENTS_PUBLISHED, + INDEX_COUNTERS_EVENTS_MISSED_CACHE, + COUNTERS_EVENTS_TOTAL +} stats_counter_index_t; + +#define EVENTS_STATS_FIELD_NAME "value" +#define STATS_HEARTBEAT_MIN 300 + +/* + * Started by eventd_service. + * Creates XPUB & XSUB end points. + * Bind the same + * Create a PUB socket end point for capture and bind. + * Call run_proxy method with sockets in a dedicated thread. + * Thread runs forever until the zmq context is terminated. + */ +class eventd_proxy +{ + public: + eventd_proxy(void *ctx) : m_ctx(ctx), m_frontend(NULL), m_backend(NULL), + m_capture(NULL) {}; + + ~eventd_proxy() { + zmq_close(m_frontend); + zmq_close(m_backend); + zmq_close(m_capture); + + if (m_thr.joinable()) + m_thr.join(); + } + + int init(); + + private: + void run(); + + void *m_ctx; + void *m_frontend; + void *m_backend; + void *m_capture; + thread m_thr; +}; + + +class stats_collector +{ + public: + stats_collector(); + + ~stats_collector() { stop(); } + + int start(); + + void stop() { + + m_shutdown = true; + + if (m_thr_collector.joinable()) { + m_thr_collector.join(); + } + + if (m_thr_writer.joinable()) { + m_thr_writer.join(); + } + } + + void increment_published(counters_t val) { + _update_stats(INDEX_COUNTERS_EVENTS_PUBLISHED, val); + } + + void increment_missed_cache(counters_t val) { + _update_stats(INDEX_COUNTERS_EVENTS_MISSED_CACHE, val); + } + + counters_t read_counter(stats_counter_index_t index) { + if (index != COUNTERS_EVENTS_TOTAL) { + return m_lst_counters[index]; + } + else { + return 0; + } + } + + /* Sets heartbeat interval in milliseconds */ + void set_heartbeat_interval(int val_in_ms); + + /* + * Get heartbeat interval in milliseconds + * NOTE: Set & get value may not match as the value is rounded + * to a multiple of smallest possible interval. + */ + int get_heartbeat_interval(); + + /* A way to pause heartbeat */ + void heartbeat_ctrl(bool pause = false) { + m_pause_heartbeat = pause; + SWSS_LOG_INFO("Set heartbeat_ctrl pause=%d", pause); + } + + uint64_t heartbeats_published() const { + return m_heartbeats_published; + } + + bool is_running() + { + return !m_shutdown; + } + + private: + void _update_stats(stats_counter_index_t index, counters_t val) { + if (index != COUNTERS_EVENTS_TOTAL) { + m_lst_counters[index] += val; + m_updated = true; + } + else { + SWSS_LOG_ERROR("Internal code error. Invalid index=%d", index); + } + } + + void run_collector(); + + void run_writer(); + + atomic m_updated; + + counters_t m_lst_counters[COUNTERS_EVENTS_TOTAL]; + + bool m_shutdown; + + thread m_thr_collector; + thread m_thr_writer; + + shared_ptr m_counters_db; + shared_ptr m_stats_table; + + bool m_pause_heartbeat; + + uint64_t m_heartbeats_published; + + int m_heartbeats_interval_cnt; +}; + +/* + * Capture/Cache service + * + * The service started in a dedicted thread upon demand. + * It is controlled by the caller. + * On cache init, the thread is created. + * Upon create, it creates a SUB socket to PUB end point of capture. + * PUB end point is maintained by zproxy service. + * + * On Cache start, the thread is signalled to start reading. + * + * On cache stop, it is signalled to stop reading and exit. Caller waits + * for thread to exit, before starting to read cached data, to ensure + * that the data is not handled by two threads concurrently. + * + * This thread maintains its own copy of cache. Reader, does a swap + * after thread exits. + * This thread ensures the cache is empty at the init. + * + * Upon cache start, the thread is blocked in receive call with timeout. + * Only upon receive/timeout, it would notice stop signal. Hence stop + * is not synchronous. The caller may wait for thread to terminate + * via thread.join(). + * + * Each event is 2 parts. It drops the first part, which is + * more for filtering events. It creates string from second part + * and saves it. + * + * The string is the serialized version of internal_event_ref + * + * It keeps two sets of data + * 1) List of all events received in vector in same order as received + * 2) Map of last event from each runtime id upon list overflow max size. + * + * We add to the vector as much as allowed by vector and max limit, + * whichever comes first. + * + * The sequence number in internal event will help assess the missed count + * by the consumer of the cache data. + * + */ +typedef enum { + NEED_INIT = 0, + INIT_CAPTURE, + START_CAPTURE, + STOP_CAPTURE +} capture_control_t; + + +class capture_service +{ + public: + capture_service(void *ctx, int cache_max, stats_collector *stats) : + m_ctx(ctx), m_stats_instance(stats), m_cap_run(false), + m_ctrl(NEED_INIT), m_cache_max(cache_max), + m_last_events_init(false), m_total_missed_cache(0) + {} + + ~capture_service(); + + int set_control(capture_control_t ctrl, event_serialized_lst_t *p=NULL); + + int read_cache(event_serialized_lst_t &lst_fifo, + last_events_t &lst_last, counters_t &overflow_cnt); + + private: + void init_capture_cache(const event_serialized_lst_t &lst); + void do_capture(); + + void stop_capture(); + + void *m_ctx; + stats_collector *m_stats_instance; + + bool m_cap_run; + capture_control_t m_ctrl; + thread m_thr; + + int m_cache_max; + + event_serialized_lst_t m_events; + + last_events_t m_last_events; + bool m_last_events_init; + + typedef map pre_exist_id_t; + pre_exist_id_t m_pre_exist_id; + + counters_t m_total_missed_cache; + +}; + + +/* + * Main server, that starts the zproxy service and honor + * eventd service requests event_req_type_t + * + * For echo, it just echoes + * + * FOr cache start, create the SUB end of capture and kick off + * capture_events thread. Upon cache stop command, close the handle + * which will stop the caching thread with read failure. + * + * for cache read, returns the collected events in chunks. + * + */ +void run_eventd_service(); + +/* To help skip redis access during unit testing */ +void set_unit_testing(bool b); diff --git a/src/sonic-eventd/src/main.cpp b/src/sonic-eventd/src/main.cpp new file mode 100644 index 000000000000..7a20497f0986 --- /dev/null +++ b/src/sonic-eventd/src/main.cpp @@ -0,0 +1,18 @@ +#include "logger.h" +#include "eventd.h" + +void run_eventd_service(); + +int main() +{ + swss::Logger::setMinPrio(swss::Logger::SWSS_DEBUG); + SWSS_LOG_INFO("The eventd service started"); + SWSS_LOG_ERROR("ERR:The eventd service started"); + + run_eventd_service(); + + SWSS_LOG_INFO("The eventd service exited"); + + return 0; +} + diff --git a/src/sonic-eventd/src/subdir.mk b/src/sonic-eventd/src/subdir.mk new file mode 100644 index 000000000000..a1e2b55f8d13 --- /dev/null +++ b/src/sonic-eventd/src/subdir.mk @@ -0,0 +1,13 @@ +CC := g++ + +TEST_OBJS += ./src/eventd.o +OBJS += ./src/eventd.o ./src/main.o + +C_DEPS += ./src/eventd.d ./src/main.d + +src/%.o: src/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst src/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' diff --git a/src/sonic-eventd/tests/eventd_ut.cpp b/src/sonic-eventd/tests/eventd_ut.cpp new file mode 100644 index 000000000000..399255edb2b8 --- /dev/null +++ b/src/sonic-eventd/tests/eventd_ut.cpp @@ -0,0 +1,915 @@ +#include +#include +#include +#include +#include +#include +#include +#include "gtest/gtest.h" +#include "events_common.h" +#include "events.h" +#include "../src/eventd.h" + +using namespace std; +using namespace swss; + +extern bool g_is_redis_available; +extern const char *counter_keys[]; + +typedef struct { + int id; + string source; + string tag; + string rid; + string seq; + event_params_t params; + int missed_cnt; +} test_data_t; + +internal_event_t create_ev(const test_data_t &data) +{ + internal_event_t event_data; + + event_data[EVENT_STR_DATA] = convert_to_json( + data.source + ":" + data.tag, data.params); + event_data[EVENT_RUNTIME_ID] = data.rid; + event_data[EVENT_SEQUENCE] = data.seq; + + return event_data; +} + +/* Mock test data with event parameters and expected missed count */ +static const test_data_t ldata[] = { + { + 0, + "source0", + "tag0", + "guid-0", + "1", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 0 + }, + { + 1, + "source0", + "tag1", + "guid-1", + "100", + {{"ip", "10.10.27.10"}, {"state", "down"}}, + 0 + }, + { + 2, + "source1", + "tag2", + "guid-2", + "101", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 0 + }, + { + 3, + "source0", + "tag3", + "guid-1", + "105", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 4 + }, + { + 4, + "source0", + "tag4", + "guid-0", + "2", + {{"ip", "10.10.20.10"}, {"state", "down"}}, + 0 + }, + { + 5, + "source1", + "tag5", + "guid-2", + "110", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 8 + }, + { + 6, + "source0", + "tag0", + "guid-0", + "5", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 2 + }, + { + 7, + "source0", + "tag1", + "guid-1", + "106", + {{"ip", "10.10.27.10"}, {"state", "down"}}, + 0 + }, + { + 8, + "source1", + "tag2", + "guid-2", + "111", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 0 + }, + { + 9, + "source0", + "tag3", + "guid-1", + "109", + {{"ip", "10.10.10.10"}, {"state", "up"}}, + 2 + }, + { + 10, + "source0", + "tag4", + "guid-0", + "6", + {{"ip", "10.10.20.10"}, {"state", "down"}}, + 0 + }, + { + 11, + "source1", + "tag5", + "guid-2", + "119", + {{"ip", "10.10.24.10"}, {"state", "down"}}, + 7 + }, +}; + + +void run_cap(void *zctx, bool &term, string &read_source, + int &cnt) +{ + void *mock_cap = zmq_socket (zctx, ZMQ_SUB); + string source; + internal_event_t ev_int; + int block_ms = 200; + int i=0; + + EXPECT_TRUE(NULL != mock_cap); + EXPECT_EQ(0, zmq_connect(mock_cap, get_config(CAPTURE_END_KEY).c_str())); + EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_SUBSCRIBE, "", 0)); + EXPECT_EQ(0, zmq_setsockopt(mock_cap, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); + + while(!term) { + string source; + internal_event_t ev_int; + + if (0 == zmq_message_read(mock_cap, 0, source, ev_int)) { + cnt = ++i; + } + } + zmq_close(mock_cap); +} + +void run_sub(void *zctx, bool &term, string &read_source, internal_events_lst_t &lst, + int &cnt) +{ + void *mock_sub = zmq_socket (zctx, ZMQ_SUB); + string source; + internal_event_t ev_int; + int block_ms = 200; + + EXPECT_TRUE(NULL != mock_sub); + EXPECT_EQ(0, zmq_connect(mock_sub, get_config(XPUB_END_KEY).c_str())); + EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_SUBSCRIBE, "", 0)); + EXPECT_EQ(0, zmq_setsockopt(mock_sub, ZMQ_RCVTIMEO, &block_ms, sizeof (block_ms))); + + while(!term) { + if (0 == zmq_message_read(mock_sub, 0, source, ev_int)) { + lst.push_back(ev_int); + read_source.swap(source); + cnt = (int)lst.size(); + } + } + + zmq_close(mock_sub); +} + +void *init_pub(void *zctx) +{ + void *mock_pub = zmq_socket (zctx, ZMQ_PUB); + EXPECT_TRUE(NULL != mock_pub); + EXPECT_EQ(0, zmq_connect(mock_pub, get_config(XSUB_END_KEY).c_str())); + + /* Provide time for async connect to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + + return mock_pub; +} + +void run_pub(void *mock_pub, const string wr_source, internal_events_lst_t &lst) +{ + for(internal_events_lst_t::const_iterator itc = lst.begin(); itc != lst.end(); ++itc) { + EXPECT_EQ(0, zmq_message_send(mock_pub, wr_source, *itc)); + } +} + + +TEST(eventd, proxy) +{ + printf("Proxy TEST started\n"); + bool term_sub = false; + bool term_cap = false; + string rd_csource, rd_source, wr_source("hello"); + internal_events_lst_t rd_evts, wr_evts; + int rd_evts_sz = 0, rd_cevts_sz = 0; + int wr_sz; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* subscriber in a thread */ + thread thr(&run_sub, zctx, ref(term_sub), ref(rd_source), ref(rd_evts), ref(rd_evts_sz)); + + /* capture in a thread */ + thread thrc(&run_cap, zctx, ref(term_cap), ref(rd_csource), ref(rd_cevts_sz)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + EXPECT_TRUE(5 < ARRAY_SIZE(ldata)); + + for(int i=0; i<5; ++i) { + wr_evts.push_back(create_ev(ldata[i])); + } + + EXPECT_TRUE(rd_evts.empty()); + EXPECT_TRUE(rd_source.empty()); + + /* Publish events. */ + run_pub(mock_pub, wr_source, wr_evts); + + wr_sz = (int)wr_evts.size(); + for(int i=0; (wr_sz != rd_evts_sz) && (i < 100); ++i) { + /* Loop & wait for atmost a second */ + this_thread::sleep_for(chrono::milliseconds(10)); + } + this_thread::sleep_for(chrono::milliseconds(1000)); + + delete pxy; + pxy = NULL; + + term_sub = true; + term_cap = true; + + thr.join(); + thrc.join(); + EXPECT_EQ(rd_evts.size(), wr_evts.size()); + EXPECT_EQ(rd_cevts_sz, wr_evts.size()); + + zmq_close(mock_pub); + zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + + printf("eventd_proxy is tested GOOD\n"); +} + + +TEST(eventd, capture) +{ + printf("Capture TEST started\n"); + + bool term_sub = false; + string sub_source; + int sub_evts_sz = 0; + internal_events_lst_t sub_evts; + stats_collector stats_instance; + + /* run_pub details */ + string wr_source("hello"); + internal_events_lst_t wr_evts; + + /* capture related */ + int init_cache = 3; /* provided along with start capture */ + int cache_max = init_cache + 3; /* capture service cache max */ + + /* startup strings; expected list & read list from capture */ + event_serialized_lst_t evts_start, evts_expect, evts_read; + last_events_t last_evts_exp, last_evts_read; + counters_t overflow, overflow_exp = 0; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run the proxy; Capture service reads from proxy */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); + + /* Expect START_CAPTURE */ + EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); + + /* Initialize the capture */ + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + + EXPECT_TRUE(init_cache > 1); + EXPECT_TRUE((cache_max+3) < (int)ARRAY_SIZE(ldata)); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + evts_expect.push_back(evt_str); + } + + /* + * Collect events to publish for capture to cache + * re-publishing some events sent in cache. + * Hence i=1, when first init_cache events are already + * in crash. + */ + for(int i=1; i < (int)ARRAY_SIZE(ldata); ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + + serialize(ev, evt_str); + + wr_evts.push_back(ev); + + if (i < cache_max) { + if (i >= init_cache) { + /* for i < init_cache, evts_expect is already populated */ + evts_expect.push_back(evt_str); + } + } else { + /* collect last entries for overflow */ + last_evts_exp[ldata[i].rid] = evt_str; + overflow_exp++; + } + } + overflow_exp -= (int)last_evts_exp.size(); + + EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + /* Publish events from 1 to all. */ + run_pub(mock_pub, wr_source, wr_evts); + + /* Provide time for async message receive. */ + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + /* terminate subs thread */ + term_sub = true; + + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); + +#ifdef DEBUG_TEST + if ((evts_read.size() != evts_expect.size()) || + (last_evts_read.size() != last_evts_exp.size())) { + printf("size: sub_evts_sz=%d sub_evts=%d\n", sub_evts_sz, (int)sub_evts.size()); + printf("init_cache=%d cache_max=%d\n", init_cache, cache_max); + printf("overflow=%ul overflow_exp=%ul\n", overflow, overflow_exp); + printf("evts_start=%d evts_expect=%d evts_read=%d\n", + (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); + printf("last_evts_exp=%d last_evts_read=%d\n", (int)last_evts_exp.size(), + (int)last_evts_read.size()); + } +#endif + + EXPECT_EQ(evts_read.size(), evts_expect.size()); + EXPECT_EQ(evts_read, evts_expect); + EXPECT_EQ(last_evts_read.size(), last_evts_exp.size()); + EXPECT_EQ(last_evts_read, last_evts_exp); + EXPECT_EQ(overflow, overflow_exp); + + delete pxy; + pxy = NULL; + + delete pcap; + pcap = NULL; + + thr_sub.join(); + + zmq_close(mock_pub); + zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + + printf("Capture TEST completed\n"); +} + +TEST(eventd, captureCacheMax) +{ + printf("Capture TEST with matchinhg cache-max started\n"); + + /* + * Need to run subscriber; Else publisher would skip publishing + * in the absence of any subscriber. + */ + bool term_sub = false; + string sub_source; + int sub_evts_sz = 0; + internal_events_lst_t sub_evts; + stats_collector stats_instance; + + /* run_pub details */ + string wr_source("hello"); + internal_events_lst_t wr_evts; + + /* capture related */ + int init_cache = 4; /* provided along with start capture */ + int cache_max = ARRAY_SIZE(ldata); /* capture service cache max */ + + /* startup strings; expected list & read list from capture */ + event_serialized_lst_t evts_start, evts_expect, evts_read; + last_events_t last_evts_read; + counters_t overflow; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run the proxy; Capture service reads from proxy */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Run subscriber; Else publisher will drop events on floor, with no subscriber. */ + thread thr_sub(&run_sub, zctx, ref(term_sub), ref(sub_source), ref(sub_evts), ref(sub_evts_sz)); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); + + /* Expect START_CAPTURE */ + EXPECT_EQ(-1, pcap->set_control(STOP_CAPTURE)); + + EXPECT_TRUE(init_cache > 1); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + evts_expect.push_back(evt_str); + } + + /* + * Collect events to publish for capture to cache + * re-publishing some events sent in cache. + */ + for(int i=1; i < (int)ARRAY_SIZE(ldata); ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + + serialize(ev, evt_str); + + wr_evts.push_back(ev); + + if (i >= init_cache) { + /* for i < init_cache, evts_expect is already populated */ + evts_expect.push_back(evt_str); + } + } + + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + EXPECT_EQ(0, pcap->set_control(START_CAPTURE, &evts_start)); + + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + + /* Publish events from 1 to all. */ + run_pub(mock_pub, wr_source, wr_evts); + + /* Provide time for async message receive. */ + this_thread::sleep_for(chrono::milliseconds(100)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + /* terminate subs thread */ + term_sub = true; + + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); + +#ifdef DEBUG_TEST + if ((evts_read.size() != evts_expect.size()) || + !last_evts_read.empty()) { + printf("size: sub_evts_sz=%d sub_evts=%d\n", sub_evts_sz, (int)sub_evts.size()); + printf("init_cache=%d cache_max=%d\n", init_cache, cache_max); + printf("evts_start=%d evts_expect=%d evts_read=%d\n", + (int)evts_start.size(), (int)evts_expect.size(), (int)evts_read.size()); + printf("last_evts_read=%d\n", (int)last_evts_read.size()); + printf("overflow=%ul overflow_exp=%ul\n", overflow, overflow_exp); + } +#endif + + EXPECT_EQ(evts_read, evts_expect); + EXPECT_TRUE(last_evts_read.empty()); + EXPECT_EQ(overflow, 0); + + delete pxy; + pxy = NULL; + + delete pcap; + pcap = NULL; + + thr_sub.join(); + + zmq_close(mock_pub); + zmq_ctx_term(zctx); + + /* Provide time for async proxy removal to complete */ + this_thread::sleep_for(chrono::milliseconds(200)); + + printf("Capture TEST with matchinhg cache-max completed\n"); +} + +TEST(eventd, service) +{ + /* + * Don't PUB/SUB events as main run_eventd_service itself + * is using zmq_message_read. Any PUB/SUB will cause + * eventd's do_capture running in another thread to call + * zmq_message_read, which will crash as boost:archive is + * not thread safe. + * TEST(eventd, capture) has already tested caching. + */ + printf("Service TEST started\n"); + + /* startup strings; expected list & read list from capture */ + event_service service; + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* + * Start the eventd server side service + * It runs proxy & capture service + * It uses its own zmq context + * It starts to capture too. + */ + + if (!g_is_redis_available) { + set_unit_testing(true); + } + + thread thread_service(&run_eventd_service); + + /* Need client side service to interact with server side */ + EXPECT_EQ(0, service.init_client(zctx)); + + { + /* eventd_service starts cache too; Test this caching */ + /* Init pub connection */ + void *mock_pub = init_pub(zctx); + EXPECT_TRUE(NULL != mock_pub); + + internal_events_lst_t wr_evts; + int wr_sz = 2; + string wr_source("hello"); + + /* Test service startup caching */ + event_serialized_lst_t evts_start, evts_read; + + for(int i=0; i evts_start_int; + + EXPECT_TRUE(init_cache > 1); + + /* Collect few serailized strings of events for startup cache */ + for(int i=0; i < init_cache; ++i) { + internal_event_t ev(create_ev(ldata[i])); + string evt_str; + serialize(ev, evt_str); + evts_start.push_back(evt_str); + evts_start_int.push_back(ev); + } + + + EXPECT_EQ(0, service.cache_init()); + EXPECT_EQ(0, service.cache_start(evts_start)); + + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Stop capture, closes socket & terminates the thread */ + EXPECT_EQ(0, service.cache_stop()); + + /* Read the cache */ + EXPECT_EQ(0, service.cache_read(evts_read)); + + if (evts_read != evts_start) { + vector evts_read_int; + + for (event_serialized_lst_t::const_iterator itc = evts_read.begin(); + itc != evts_read.end(); ++itc) { + internal_event_t event; + + if (deserialize(*itc, event) == 0) { + evts_read_int.push_back(event); + } + } + EXPECT_EQ(evts_read_int, evts_start_int); + } + } + + { + string set_opt_bad("{\"HEARTBEAT_INTERVAL\": 2000, \"OFFLINE_CACHE_SIZE\": 500}"); + string set_opt_good("{\"HEARTBEAT_INTERVAL\":5}"); + char buff[100]; + buff[0] = 0; + + EXPECT_EQ(-1, service.global_options_set(set_opt_bad.c_str())); + EXPECT_EQ(0, service.global_options_set(set_opt_good.c_str())); + EXPECT_LT(0, service.global_options_get(buff, sizeof(buff))); + + EXPECT_EQ(set_opt_good, string(buff)); + } + + EXPECT_EQ(0, service.send_recv(EVENT_EXIT)); + + service.close_service(); + + thread_service.join(); + + zmq_ctx_term(zctx); + printf("Service TEST completed\n"); +} + + +void +wait_for_heartbeat(stats_collector &stats_instance, long unsigned int cnt, + int wait_ms = 3000) +{ + int diff = 0; + + auto st = duration_cast(system_clock::now().time_since_epoch()).count(); + while (stats_instance.heartbeats_published() == cnt) { + auto en = duration_cast(system_clock::now().time_since_epoch()).count(); + diff = en - st; + if (diff > wait_ms) { + EXPECT_LE(diff, wait_ms); + EXPECT_EQ(cnt, stats_instance.heartbeats_published()); + break; + } + else { + stringstream ss; + ss << (en -st); + } + this_thread::sleep_for(chrono::milliseconds(300)); + } +} + +TEST(eventd, heartbeat) +{ + printf("heartbeat TEST started\n"); + + int rc; + long unsigned int cnt; + stats_collector stats_instance; + + if (!g_is_redis_available) { + set_unit_testing(true); + } + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + rc = stats_instance.start(); + EXPECT_EQ(rc, 0); + + /* Wait for any non-zero heartbeat */ + wait_for_heartbeat(stats_instance, 0); + + /* Pause heartbeat */ + stats_instance.heartbeat_ctrl(true); + + /* Sleep to ensure the other thread noticed the pause request. */ + this_thread::sleep_for(chrono::milliseconds(200)); + + /* Get current count */ + cnt = stats_instance.heartbeats_published(); + + /* Wait for 3 seconds with no new neartbeat */ + this_thread::sleep_for(chrono::seconds(3)); + + EXPECT_EQ(stats_instance.heartbeats_published(), cnt); + + /* Set interval as 1 second */ + stats_instance.set_heartbeat_interval(1); + + /* Turn on heartbeat */ + stats_instance.heartbeat_ctrl(); + + /* Wait for heartbeat count to change from last count */ + wait_for_heartbeat(stats_instance, cnt, 2000); + + stats_instance.stop(); + + delete pxy; + + zmq_ctx_term(zctx); + + printf("heartbeat TEST completed\n"); +} + + +TEST(eventd, testDB) +{ + printf("DB TEST started\n"); + + /* consts used */ + const int pub_count = 7; + const int cache_max = 3; + + stats_collector stats_instance; + event_handle_t pub_handle; + event_serialized_lst_t evts_read; + last_events_t last_evts_read; + counters_t overflow; + string tag; + + if (!g_is_redis_available) { + printf("redis not available; Hence DB TEST skipped\n"); + return; + } + + EXPECT_LT(cache_max, pub_count); + DBConnector db("COUNTERS_DB", 0, true); + + + /* Not testing heartbeat; Hence set high val as 10 seconds */ + stats_instance.set_heartbeat_interval(10000); + + /* Start instance to capture published count & as well writes to DB */ + EXPECT_EQ(0, stats_instance.start()); + + void *zctx = zmq_ctx_new(); + EXPECT_TRUE(NULL != zctx); + + /* Run proxy to enable receive as capture test needs to receive */ + eventd_proxy *pxy = new eventd_proxy(zctx); + EXPECT_TRUE(NULL != pxy); + + /* Starting proxy */ + EXPECT_EQ(0, pxy->init()); + + /* Create capture service */ + capture_service *pcap = new capture_service(zctx, cache_max, &stats_instance); + + /* Initialize the capture */ + EXPECT_EQ(0, pcap->set_control(INIT_CAPTURE)); + + /* Kick off capture */ + EXPECT_EQ(0, pcap->set_control(START_CAPTURE)); + + pub_handle = events_init_publisher("test_db"); + + for(int i=0; i < pub_count; ++i) { + tag = string("test_db_tag_") + to_string(i); + event_publish(pub_handle, tag); + } + + /* Pause to ensure all publisghed events did reach capture service */ + this_thread::sleep_for(chrono::milliseconds(200)); + + EXPECT_EQ(0, pcap->set_control(STOP_CAPTURE)); + + /* Read the cache */ + EXPECT_EQ(0, pcap->read_cache(evts_read, last_evts_read, overflow)); + + /* + * Sent pub_count messages of different tags. + * Upon cache max, only event per sender/runtime-id is saved. Hence + * expected last_evts_read is one. + * expected overflow = pub_count - cache_max - 1 + */ + + EXPECT_EQ(cache_max, (int)evts_read.size()); + EXPECT_EQ(1, (int)last_evts_read.size()); + EXPECT_EQ((pub_count - cache_max - 1), overflow); + + EXPECT_EQ(pub_count, stats_instance.read_counter( + INDEX_COUNTERS_EVENTS_PUBLISHED)); + EXPECT_EQ((pub_count - cache_max - 1), stats_instance.read_counter( + INDEX_COUNTERS_EVENTS_MISSED_CACHE)); + + events_deinit_publisher(pub_handle); + + for (int i=0; i < COUNTERS_EVENTS_TOTAL; ++i) { + string key = string("COUNTERS_EVENTS:") + counter_keys[i]; + unordered_map m; + bool key_found = false, val_found=false, val_match=false; + + if (db.exists(key)) { + try { + m = db.hgetall(key); + unordered_map::const_iterator itc = + m.find(string(EVENTS_STATS_FIELD_NAME)); + if (itc != m.end()) { + int expect = (counter_keys[i] == string(COUNTERS_EVENTS_PUBLISHED) ? + pub_count : (pub_count - cache_max - 1)); + val_match = (expect == stoi(itc->second) ? true : false); + val_found = true; + } + } + catch (exception &e) + { + printf("Failed to get key=(%s) err=(%s)", key.c_str(), e.what()); + EXPECT_TRUE(false); + } + key_found = true; + } + + if (!val_match) { + printf("key=%s key_found=%d val_found=%d fields=%d", + key.c_str(), key_found, val_found, (int)m.size()); + + printf("hgetall BEGIN key=%s", key.c_str()); + for(unordered_map::const_iterator itc = m.begin(); + itc != m.end(); ++itc) { + printf("val[%s] = (%s)", itc->first.c_str(), itc->second.c_str()); + } + printf("hgetall END\n"); + EXPECT_TRUE(false); + } + } + + stats_instance.stop(); + + delete pxy; + delete pcap; + + zmq_ctx_term(zctx); + + printf("DB TEST completed\n"); +} + + +// TODO -- Add unit tests for stats diff --git a/src/sonic-eventd/tests/main.cpp b/src/sonic-eventd/tests/main.cpp new file mode 100644 index 000000000000..4b869e8c3004 --- /dev/null +++ b/src/sonic-eventd/tests/main.cpp @@ -0,0 +1,97 @@ +#include "gtest/gtest.h" +#include "dbconnector.h" +#include + +using namespace std; +using namespace swss; + +string existing_file = "./tests/redis_multi_db_ut_config/database_config.json"; +string nonexisting_file = "./tests/redis_multi_db_ut_config/database_config_nonexisting.json"; +string global_existing_file = "./tests/redis_multi_db_ut_config/database_global.json"; + +#define TEST_DB "APPL_DB" +#define TEST_NAMESPACE "asic0" +#define INVALID_NAMESPACE "invalid" + +bool g_is_redis_available = false; + +class SwsscommonEnvironment : public ::testing::Environment { +public: + // Override this to define how to set up the environment + void SetUp() override { + // by default , init should be false + cout<<"Default : isInit = "<:tag\n") + return sourceTag + +def getFVMFromParams(params): + param_dict = FieldValueMap() + for key, value in params.items(): + key = str(key) + value = str(value) + param_dict[key] = value + return param_dict + +def publishEvents(line, publisher_handle): + try: + json_dict = json.loads(line) + except Exception as ex: + logging.error("JSON string not able to be parsed\n") + return + if not json_dict or len(json_dict) != 1: + logging.error("JSON string not able to be parsed\n") + return + sourceTag = list(json_dict)[0] + params = list(json_dict.values())[0] + tag = getTag(sourceTag) + param_dict = getFVMFromParams(params) + if param_dict: + event_publish(publisher_handle, tag, param_dict) + +def publishEventsFromFile(publisher_handle, infile, count, pause): + try: + with open(infile, 'r') as f: + for line in f.readlines(): + line.rstrip() + publishEvents(line, publisher_handle) + time.sleep(pause) + except Exception as ex: + logging.error("Unable to open file from given path or has incorrect json format, gives exception {}\n".format(ex)) + logging.info("Switching to default bgp state publish events\n") + publishBGPEvents(publisher_handle, count, pause) + +def publishBGPEvents(publisher_handle, count, pause): + ip_addresses = [] + param_dict = FieldValueMap() + + for _ in range(count): + ip = str(ipaddress.IPv4Address(random.randint(0, 2 ** 32))) + ip_addresses.append(ip) + + # publish down events + for ip in ip_addresses: + param_dict["ip"] = ip + param_dict["status"] = "down" + event_publish(publisher_handle, "bgp-state", param_dict) + time.sleep(pause) + + # publish up events + for ip in ip_addresses: + param_dict["ip"] = ip + event_publish(publisher_handle, "bgp-state", param_dict) + time.sleep(pause) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-s", "--source", nargs='?', const='test-event-source', default='test-event-source', help="Source of event, default us test-event-source") + parser.add_argument("-f", "--file", nargs='?', const='', default='', help="File containing json event strings, must be in format \'{\":foo\": {\"aaa\": \"AAA\", \"bbb\": \"BBB\"}}\'") + parser.add_argument("-c", "--count", nargs='?', type=int, const=10, default=10, help="Count of default bgp events to be generated") + parser.add_argument("-p", "--pause", nargs='?', type=float, const=0.0, default=0.0, help="Pause time wanted between each event, default is 0") + args = parser.parse_args() + publisher_handle = events_init_publisher(args.source) + if args.file == '': + publishBGPEvents(publisher_handle, args.count, args.pause) + else: + publishEventsFromFile(publisher_handle, args.file, args.count, args.pause) + +if __name__ == "__main__": + main() diff --git a/src/sonic-eventd/tools/events_tool.cpp b/src/sonic-eventd/tools/events_tool.cpp new file mode 100644 index 000000000000..97b17c1d7566 --- /dev/null +++ b/src/sonic-eventd/tools/events_tool.cpp @@ -0,0 +1,328 @@ +#include +#include +#include "events.h" +#include "events_common.h" + +/* + * Sample i/p file contents for send + * + * {"src_0:key-0": {"foo": "bar", "hello": "world" }} + * {"src_0:key-1": {"foo": "barXX", "hello": "world" }} + * + * Repeat the above line to increase entries. + * Each line is parsed independently, so no "," expected at the end. + */ + +#define ASSERT(res, m, ...) \ + if (!(res)) {\ + int _e = errno; \ + printf("Failed here %s:%d errno:%d zerrno:%d ", __FUNCTION__, __LINE__, _e, zmq_errno()); \ + printf(m, ##__VA_ARGS__); \ + printf("\n"); \ + exit(-1); } + + +typedef enum { + OP_INIT=0, + OP_SEND=1, + OP_RECV=2, + OP_SEND_RECV=3 //SEND|RECV +} op_t; + + +#define PRINT_CHUNK_SZ 2 + +/* + * Usage: + */ + +const char *s_usage = "\ +-s - To Send\n\ +-r - To receive\n\ +Note:\n\ + when both -s & -r are given:\n\ + it uses main thread to publish and fork a dedicated thread to receive.\n\ + The rest of the parameters except -w is used for send\n\ +\n\ +-n - Count of messages to send/receive. When both given, it is used as count to send\n\ + Default: 1 \n\ + A value of 0 implies unlimited\n\ +\n\ +-p - Count of milliseconds to pause between sends or receives. In send-recv mode, it only affects send.\n\ + Default: 0 implying no pause\n\ +\n\ + -i - List of JSON messages to send in a file, with each event/message\n\ + declared in a single line. When n is more than size of list, the list\n\ + is rotated upon completion.\n\ + e.g. '[ \n\ + { \"sonic-bgp:bgp-state\": { \"ip\": \"10.101.01.10\", \"ts\": \"2022-10-11T01:02:30.45567\", \"state\": \"up\" }}\n\ + { \"abc-xxx:yyy-zz\": { \"foo\": \"bar\", \"hello\":\"world\", \"ts\": \"2022-10-11T01:02:30.45567\"}}\n\ + { \"some-mod:some-tag\": {}}\n\ + ]\n\ + Default: \n\ +\n\ +-c - Use offline cache in receive mode\n\ +-o - O/p file to write received events\n\ + Default: STDOUT\n"; + + +bool term_receive = false; + +template +string +t_map_to_str(const Map &m) +{ + stringstream _ss; + string sep; + + _ss << "{"; + for (const auto elem: m) { + _ss << sep << "{" << elem.first << "," << elem.second << "}"; + if (sep.empty()) { + sep = ", "; + } + } + _ss << "}"; + return _ss.str(); +} + +void +do_receive(const event_subscribe_sources_t filter, const string outfile, int cnt, int pause, bool use_cache) +{ + int index=0, total_missed = 0; + ostream* fp = &cout; + ofstream fout; + + if (!outfile.empty()) { + fout.open(outfile); + if (!fout.fail()) { + fp = &fout; + printf("outfile=%s set\n", outfile.c_str()); + } + } + event_handle_t h = events_init_subscriber(use_cache, 2000, filter.empty() ? NULL : &filter); + printf("Subscribed with use_cache=%d timeout=2000 filter %s\n", + use_cache, filter.empty() ? "empty" : "non-empty"); + ASSERT(h != NULL, "Failed to get subscriber handle"); + + while(!term_receive) { + event_receive_op_t evt; + map_str_str_t evtOp; + + int rc = event_receive(h, evt); + if (rc != 0) { + ASSERT(rc == EAGAIN, "Failed to receive rc=%d index=%d\n", + rc, index); + continue; + } + ASSERT(!evt.key.empty(), "received EMPTY key"); + ASSERT(evt.missed_cnt >= 0, "Missed count uninitialized"); + ASSERT(evt.publish_epoch_ms > 0, "publish_epoch_ms uninitialized"); + + total_missed += evt.missed_cnt; + + evtOp[evt.key] = t_map_to_str(evt.params); + (*fp) << t_map_to_str(evtOp) << "\n"; + fp->flush(); + + if ((++index % PRINT_CHUNK_SZ) == 0) { + printf("Received index %d\n", index); + } + + if (cnt > 0) { + if (--cnt <= 0) { + break; + } + } + } + + events_deinit_subscriber(h); + printf("Total received = %d missed = %dfile:%s\n", index, total_missed, + outfile.empty() ? "STDOUT" : outfile.c_str()); +} + + +int +do_send(const string infile, int cnt, int pause) +{ + typedef struct { + string tag; + event_params_t params; + } evt_t; + + typedef vector lst_t; + + lst_t lst; + string source; + event_handle_t h; + int index = 0; + + if (!infile.empty()) { + ifstream input(infile); + + /* Read infile into list of events, that are ready for send */ + for( string line; getline( input, line ); ) + { + evt_t evt; + string str_params; + + const auto &data = nlohmann::json::parse(line); + ASSERT(data.is_object(), "Parsed data is not object"); + ASSERT((int)data.size() == 1, "string parse size = %d", (int)data.size()); + + string key(data.begin().key()); + if (source.empty()) { + source = key.substr(0, key.find(":")); + } else { + ASSERT(source == key.substr(0, key.find(":")), "source:%s read=%s", + source.c_str(), key.substr(0, key.find(":")).c_str()); + } + evt.tag = key.substr(key.find(":")+1); + + const auto &val = data.begin().value(); + ASSERT(val.is_object(), "Parsed params is not object"); + ASSERT((int)val.size() >= 1, "Expect non empty params"); + + for(auto par_it = val.begin(); par_it != val.end(); par_it++) { + evt.params[string(par_it.key())] = string(par_it.value()); + } + lst.push_back(evt); + } + } + + if (lst.empty()) { + evt_t evt = { + "test-tag", + { + { "param1", "foo"}, + {"param2", "bar"} + } + }; + lst.push_back(evt); + } + + h = events_init_publisher(source); + ASSERT(h != NULL, "failed to init publisher"); + + /* cnt = 0 as i/p implies forever */ + + while(cnt >= 0) { + /* Keep resending the list until count is exhausted */ + for(lst_t::const_iterator itc = lst.begin(); (cnt >= 0) && (itc != lst.end()); ++itc) { + const evt_t &evt = *itc; + + if ((++index % PRINT_CHUNK_SZ) == 0) { + printf("Sending index %d\n", index); + } + + int rc = event_publish(h, evt.tag, evt.params.empty() ? NULL : &evt.params); + ASSERT(rc == 0, "Failed to publish index=%d rc=%d", index, rc); + + if ((cnt > 0) && (--cnt == 0)) { + /* set to termninate */ + cnt = -1; + } + else if (pause) { + /* Pause between two sends */ + this_thread::sleep_for(chrono::milliseconds(pause)); + } + } + } + + events_deinit_publisher(h); + printf("Sent %d events\n", index); + return 0; +} + +void usage() +{ + printf("%s", s_usage); + exit(-1); +} + +int main(int argc, char **argv) +{ + bool use_cache = false; + int op = OP_INIT; + int cnt=0, pause=0; + string json_str_msg, outfile("STDOUT"), infile; + event_subscribe_sources_t filter; + + for(;;) + { + switch(getopt(argc, argv, "srn:p:i:o:f:c")) // note the colon (:) to indicate that 'b' has a parameter and is not a switch + { + case 'c': + use_cache = true; + continue; + + case 's': + op |= OP_SEND; + continue; + + case 'r': + op |= OP_RECV; + continue; + + case 'n': + cnt = stoi(optarg); + continue; + + case 'p': + pause = stoi(optarg); + continue; + + case 'i': + infile = optarg; + continue; + + case 'o': + outfile = optarg; + continue; + + case 'f': + { + stringstream ss(optarg); //create string stream from the string + while(ss.good()) { + string substr; + getline(ss, substr, ','); + filter.push_back(substr); + } + } + continue; + + case -1: + break; + + case '?': + case 'h': + default : + usage(); + break; + + } + break; + } + + + printf("op=%d n=%d pause=%d i=%s o=%s\n", + op, cnt, pause, infile.c_str(), outfile.c_str()); + + if (op == OP_SEND_RECV) { + thread thr(&do_receive, filter, outfile, 0, 0, use_cache); + do_send(infile, cnt, pause); + } + else if (op == OP_SEND) { + do_send(infile, cnt, pause); + } + else if (op == OP_RECV) { + do_receive(filter, outfile, cnt, pause, use_cache); + } + else { + ASSERT(false, "Elect -s for send or -r receive or both; Bailing out with no action\n"); + } + + printf("--------- END: Good run -----------------\n"); + return 0; +} + diff --git a/src/sonic-eventd/tools/events_volume_test.py b/src/sonic-eventd/tools/events_volume_test.py new file mode 100644 index 000000000000..73143d483cd8 --- /dev/null +++ b/src/sonic-eventd/tools/events_volume_test.py @@ -0,0 +1,68 @@ +import sys +import subprocess +import time +import logging +import argparse + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(message)s", + handlers = [ + logging.FileHandler("debug.log"), + logging.StreamHandler(sys.stdout) + ] +) + +def read_events_from_file(file, count): + logging.info("Reading from file generated by events_tool") + lines = 0 + with open(file, 'r') as infile: + lines = infile.readlines() + logging.info("Should receive {} events and got {} events\n".format(count, len(lines))) + assert len(lines) == count + +def start_tool(file): + logging.info("Starting events_tool\n") + proc = subprocess.Popen(["./events_tool", "-r", "-o", file]) + return proc + +def run_test(process, file, count, duplicate): + # log messages to see if events have been received + tool_proc = start_tool(file) + + time.sleep(2) # buffer for events_tool to startup + logging.info("Generating logger messages\n") + for i in range(count): + line = "" + state = "up" + if duplicate: + line = "{} test message testmessage state up".format(process) + else: + if i % 2 != 1: + state = "down" + line = "{} test message testmessage{} state {}".format(process, i, state) + command = "logger -p local0.notice -t {}".format(line) + subprocess.run(command, shell=True, stdout=subprocess.PIPE) + + time.sleep(2) # some buffer for all events to be published to file + read_events_from_file(file, count) + tool_proc.terminate() + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--process", nargs='?', const ='', default='', help="Process that is spitting out log") + parser.add_argument("-f", "--file", nargs='?', const='', default='', help="File used by events_tool to read events from") + parser.add_argument("-c", "--count", type=int, nargs='?', const=1000, default=1000, help="Count of times log message needs to be published down/up, default is 1000") + args = parser.parse_args() + if(args.process == '' or args.file == ''): + logging.error("Invalid process or logfile\n") + return + logging.info("Starting volume test\n") + logging.info("Generating {} unique messages for rsyslog plugin\n".format(args.count)) + run_test(args.process, args.file, args.count, False) + time.sleep(2) + logging.info("Restarting volume test but for duplicate log messages\n") + run_test(args.process, args.file, args.count, True) + +if __name__ == "__main__": + main() diff --git a/src/sonic-eventd/tools/sample_ip.json b/src/sonic-eventd/tools/sample_ip.json new file mode 100644 index 000000000000..acb8726cf253 --- /dev/null +++ b/src/sonic-eventd/tools/sample_ip.json @@ -0,0 +1 @@ +{"src_0:key-0": {"foo": "bar", "hello": "world" }} diff --git a/src/sonic-eventd/tools/subdir.mk b/src/sonic-eventd/tools/subdir.mk new file mode 100644 index 000000000000..5f13043dd612 --- /dev/null +++ b/src/sonic-eventd/tools/subdir.mk @@ -0,0 +1,12 @@ +CC := g++ + +TOOL_OBJS = ./tools/events_tool.o + +C_DEPS += ./tools/events_tool.d + +tools/%.o: tools/%.cpp + @echo 'Building file: $<' + @echo 'Invoking: GCC C++ Compiler' + $(CC) -D__FILENAME__="$(subst tools/,,$<)" $(CFLAGS) -c -fmessage-length=0 -MMD -MP -MF"$(@:%.o=%.d)" -MT"$(@)" -o "$@" "$<" + @echo 'Finished building: $<' + @echo ' ' From 78eeeb76708538b431b1fd4efdd4cc37452bf70c Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Sun, 4 Sep 2022 15:53:07 +0800 Subject: [PATCH 24/52] [SN2201] remove extra empty lines in the pg_profile_lookup.ini (#11923) - Why I did it Remove extra empty lines in the SN2201 pg_profile_lookup.ini to make it aligned with other platforms. This extra empty line could confuse some test cases which need to parse this file. Signed-off-by: Kebo Liu --- .../x86_64-nvidia_sn2201-r0/ACS-SN2201/pg_profile_lookup.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/device/mellanox/x86_64-nvidia_sn2201-r0/ACS-SN2201/pg_profile_lookup.ini b/device/mellanox/x86_64-nvidia_sn2201-r0/ACS-SN2201/pg_profile_lookup.ini index 53652080e967..18d2bf5ef8c5 100644 --- a/device/mellanox/x86_64-nvidia_sn2201-r0/ACS-SN2201/pg_profile_lookup.ini +++ b/device/mellanox/x86_64-nvidia_sn2201-r0/ACS-SN2201/pg_profile_lookup.ini @@ -14,7 +14,6 @@ ## See the License for the specific language governing permissions and ## limitations under the License. ## - # PG lossless profiles. # speed cable size xon xoff threshold 100 5m 49152 19456 29696 0 From 4e18510fc9f3b485628c8ce4f294fddbd9470689 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Sun, 4 Sep 2022 10:54:16 +0300 Subject: [PATCH 25/52] [submodule]] Advance sonic-utilities pointer (#11897) Update sonic-utilities submodule pointer to include the following: Replace cmp in acl_loader with operator.eq (#2328) Subinterface vrf bind issue fix (#2211) [VRF]Adding CLI checks to ensure Vrf is valid in interface bind and static route commands (#2333) [doc]: Add MACsec CLI doc (#2334) [sonic-package-manager] Drop 'expires_in' (#2002) Handle non-front-panel ports in is_rj45_port (#2327) [service_mgmt]: Fix fetch MULTI_INST_DEPENDENT bug in service_mgmt.sh.j2 (#2319) correct an error by changing "show bgp summary" to "show bfd summary" (#2324) Update VRF unbind command (#2331) Fix issue: port_type is referenced before initialized (#2323) Fix issue: exception in is_rj45_port in multi ASIC env (#2313) Delete .DS_Store (#2244) Fix bug with checking VRF's routes in route_check.py (#2301) [decode-syseeprom] Fix setting use_db based on support_eeprom_db (#2270) Fix vrf UT failed issue (#2309) add lacp_rate to portchannel (#2036) --- src/sonic-utilities | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-utilities b/src/sonic-utilities index 28b6ba5fc11f..3af8ba4acc2b 160000 --- a/src/sonic-utilities +++ b/src/sonic-utilities @@ -1 +1 @@ -Subproject commit 28b6ba5fc11f65abaf421c70159a605d233eda41 +Subproject commit 3af8ba4acc2bbc77d17be0d67943703021c7d1e1 From 1b5d07f665e705eb0ec9fd1af70dfa0dd9545b11 Mon Sep 17 00:00:00 2001 From: Dror Prital <76714716+dprital@users.noreply.github.com> Date: Sun, 4 Sep 2022 11:03:36 +0300 Subject: [PATCH 26/52] [submodule] Advance sonic-platform-daemons pointer (#11882) - Why I did it Update sonic-platform-daemons submodule pointer to include the following: [ycabled] enable telemetry for 'active-active'; fix gRPC portid ordering (#284) [ycabled] remove some spurious logs (#282) Correct the peer forwarding state table (#281) add psu input voltage and current (#276) [ycabled] add capability to enable/disable telemetry (#279) Signed-off-by: dprital --- src/sonic-platform-daemons | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-platform-daemons b/src/sonic-platform-daemons index cc563670a5e8..7c0a326e1dcd 160000 --- a/src/sonic-platform-daemons +++ b/src/sonic-platform-daemons @@ -1 +1 @@ -Subproject commit cc563670a5e8a10a51057a4e1455a1e36b386653 +Subproject commit 7c0a326e1dcd2be74a80f6d3a2d7c4af084c2035 From a8b2a538a56d9d0a19ddcccb2cbff7f5915058f7 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 6 Sep 2022 19:26:54 +0300 Subject: [PATCH 27/52] [docker-wait-any] immediately start to wait (#11595) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It could happen that a container has already crashed but docker-wait-any will wait forever till it starts. It should, however, immediately exit to make the serivce restart. #### Why I did it It is observed in some circumstances that the auto-restart mechanism does not work. Specifically for ```swss.service```, ```orchagent``` had crashed before ```docker-wait-any``` started in ```swss.sh```. This led ```docker-wait-any``` wait forever for ```swss``` to be in ```"Running"``` state and it results in: ``` CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 1abef1ecebff bcbca2b74df6 "/usr/local/bin/supe…" 22 hours ago Up 22 hours what-just-happened 3c924d405cd5 docker-lldp:latest "/usr/bin/docker-lld…" 22 hours ago Up 22 hours lldp eb2b12a98c13 docker-router-advertiser:latest "/usr/bin/docker-ini…" 22 hours ago Up 22 hours radv d6aac4a46974 docker-sonic-mgmt-framework:latest "/usr/local/bin/supe…" 22 hours ago Up 22 hours mgmt-framework d880fd07aab9 docker-platform-monitor:latest "/usr/bin/docker_ini…" 22 hours ago Up 22 hours pmon 75f9e22d4fdd docker-snmp:latest "/usr/local/bin/supe…" 22 hours ago Up 22 hours snmp 76d570a4bd1c docker-sonic-telemetry:latest "/usr/local/bin/supe…" 22 hours ago Up 22 hours telemetry ee49f50344b3 docker-syncd-mlnx:latest "/usr/local/bin/supe…" 22 hours ago Up 22 hours syncd 1f0b0bab3687 docker-teamd:latest "/usr/local/bin/supe…" 22 hours ago Up 22 hours teamd 917aeeaf9722 docker-orchagent:latest "/usr/bin/docker-ini…" 22 hours ago Exited (0) 22 hours ago swss 81a4d3e820e8 docker-fpm-frr:latest "/usr/bin/docker_ini…" 22 hours ago Up 22 hours bgp f6eee8be282c docker-database:latest "/usr/local/bin/dock…" 22 hours ago Up 22 hours database ``` The check for ```"Running"``` state is not needed because for cold boot case we do ```start_peer_and_dependent_services``` and for warm boot case the loop will retry to wait for container if this container is doing warm boot: https://github.com/stepanblyschak/sonic-buildimage/blob/d01a91a569c9d545b30e8f81994b02d0c2513971/files/image_config/misc/docker-wait-any#L56 #### How I did it Removed the check for ```"Running"```. #### How to verify it Kill swss before ```docker-wait-any``` is reached and verify auto restart will restart swss serivce. --- files/image_config/misc/docker-wait-any | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/files/image_config/misc/docker-wait-any b/files/image_config/misc/docker-wait-any index 4200bb43c87f..3a00a2c610d1 100755 --- a/files/image_config/misc/docker-wait-any +++ b/files/image_config/misc/docker-wait-any @@ -46,10 +46,9 @@ g_dep_services = [] def wait_for_container(docker_client, container_name): - while True: - while docker_client.inspect_container(container_name)['State']['Status'] != "running": - time.sleep(1) + log.log_info("Waiting on container '{}'".format(container_name)) + while True: docker_client.wait(container_name) log.log_info("No longer waiting on container '{}'".format(container_name)) From 31e750ee0b9b67fa49f6f2e73f4c18383ce2558a Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Tue, 6 Sep 2022 15:13:05 -0700 Subject: [PATCH 28/52] Fix PR build failure (#11973) Some PR builds fails to find this file. Remove it temporarily until we root cause it --- dockers/docker-fpm-frr/Dockerfile.j2 | 6 +++--- files/build_templates/sonic_debian_extension.j2 | 4 ++-- rules/docker-eventd.mk | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/dockers/docker-fpm-frr/Dockerfile.j2 b/dockers/docker-fpm-frr/Dockerfile.j2 index fd7ad0f08ed4..25e191bc338f 100644 --- a/dockers/docker-fpm-frr/Dockerfile.j2 +++ b/dockers/docker-fpm-frr/Dockerfile.j2 @@ -56,14 +56,14 @@ COPY ["TS", "/usr/bin/TS"] COPY ["files/supervisor-proc-exit-listener", "/usr/bin"] COPY ["zsocket.sh", "/usr/bin/"] COPY ["*.json", "/etc/rsyslog.d/"] -COPY ["files/rsyslog_plugin.conf.j2", "/etc/rsyslog.d/"] +# COPY ["files/rsyslog_plugin.conf.j2", "/etc/rsyslog.d/"] RUN chmod a+x /usr/bin/TSA && \ chmod a+x /usr/bin/TSB && \ chmod a+x /usr/bin/TSC && \ chmod a+x /usr/bin/zsocket.sh -RUN j2 -f json /etc/rsyslog.d/rsyslog_plugin.conf.j2 /etc/rsyslog.d/events_info.json > /etc/rsyslog.d/bgp_events.conf -RUN rm -f /etc/rsyslog.d/rsyslog_plugin.conf.j2* +# RUN j2 -f json /etc/rsyslog.d/rsyslog_plugin.conf.j2 /etc/rsyslog.d/events_info.json > /etc/rsyslog.d/bgp_events.conf +# RUN rm -f /etc/rsyslog.d/rsyslog_plugin.conf.j2* RUN rm -f /etc/rsyslog.d/events_info.json* ENTRYPOINT ["/usr/bin/docker_init.sh"] diff --git a/files/build_templates/sonic_debian_extension.j2 b/files/build_templates/sonic_debian_extension.j2 index 56b8290cc12e..43946c10a692 100644 --- a/files/build_templates/sonic_debian_extension.j2 +++ b/files/build_templates/sonic_debian_extension.j2 @@ -800,8 +800,8 @@ sudo bash -c "echo { > $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_name sudo bash -c "echo } >> $FILESYSTEM_ROOT_USR_SHARE_SONIC_TEMPLATES/ctr_image_names.json" # copy rsyslog plugin binary for use by all dockers that use plugin to publish events. -sudo mkdir -p ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS} -sudo cp ${files_path}/rsyslog_plugin ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS}/ +# sudo mkdir -p ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS} +# sudo cp ${files_path}/rsyslog_plugin ${FILESYSTEM_ROOT_USR_SHARE_SONIC_SCRIPTS}/ {% for script in installer_start_scripts.split(' ') -%} if [ -f $TARGET_MACHINE"_{{script}}" ]; then diff --git a/rules/docker-eventd.mk b/rules/docker-eventd.mk index c69fee09e569..ec333bf66048 100644 --- a/rules/docker-eventd.mk +++ b/rules/docker-eventd.mk @@ -43,5 +43,6 @@ $(DOCKER_EVENTD)_PLUGIN = rsyslog_plugin $($(DOCKER_EVENTD)_PLUGIN)_PATH = $($(DOCKER_EVENTD)_FILESPATH) SONIC_COPY_FILES += $($(DOCKER_EVENTD)_PLUGIN) -$(DOCKER_EVENTD)_SHARED_FILES = $($(DOCKER_EVENTD)_PLUGIN) +# Some builds fails to find this file. Remove until we root cause it. +# $(DOCKER_EVENTD)_SHARED_FILES = $($(DOCKER_EVENTD)_PLUGIN) From 5efd6f9748d6e87c5a279bb760442cf259eecd97 Mon Sep 17 00:00:00 2001 From: Ze Gan Date: Wed, 7 Sep 2022 08:16:23 +0800 Subject: [PATCH 29/52] [macsec]: Add MACsec clear CLI support (#11731) Why I did it To support clear MACsec counters by sonic-clear macsec How I did it Add macsec sub-command in sonic-clear to cache the current macsec stats, and in the show macsec command to check the cache and return the diff with cache file. How to verify it admin@vlab-02:~$ show macsec Ethernet0 MACsec port(Ethernet0) --------------------- ----------- cipher_suite GCM-AES-128 enable true enable_encrypt true enable_protect true enable_replay_protect false replay_window 0 send_sci true --------------------- ----------- MACsec Egress SC (52540067daa70001) ----------- - encoding_an 0 ----------- - MACsec Egress SA (0) ------------------------------------- -------------------------------- auth_key 9DDD4C69220A1FA9B6763F229B75CB6F next_pn 1 sak BA86574D054FCF48B9CD7CF54F21304A salt 000000000000000000000000 ssci 0 SAI_MACSEC_SA_ATTR_CURRENT_XPN 52 SAI_MACSEC_SA_STAT_OCTETS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OCTETS_PROTECTED 0 SAI_MACSEC_SA_STAT_OUT_PKTS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OUT_PKTS_PROTECTED 0 ------------------------------------- -------------------------------- MACsec Ingress SC (525400d4fd3f0001) MACsec Ingress SA (0) --------------------------------------- -------------------------------- active true auth_key 9DDD4C69220A1FA9B6763F229B75CB6F lowest_acceptable_pn 1 sak BA86574D054FCF48B9CD7CF54F21304A salt 000000000000000000000000 ssci 0 SAI_MACSEC_SA_ATTR_CURRENT_XPN 56 SAI_MACSEC_SA_STAT_IN_PKTS_DELAYED 0 SAI_MACSEC_SA_STAT_IN_PKTS_INVALID 0 SAI_MACSEC_SA_STAT_IN_PKTS_LATE 0 SAI_MACSEC_SA_STAT_IN_PKTS_NOT_USING_SA 0 SAI_MACSEC_SA_STAT_IN_PKTS_NOT_VALID 0 SAI_MACSEC_SA_STAT_IN_PKTS_OK 0 SAI_MACSEC_SA_STAT_IN_PKTS_UNCHECKED 0 SAI_MACSEC_SA_STAT_IN_PKTS_UNUSED_SA 0 SAI_MACSEC_SA_STAT_OCTETS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OCTETS_PROTECTED 0 --------------------------------------- -------------------------------- admin@vlab-02:~$ sonic-clear macsec Clear MACsec counters admin@vlab-02:~$ show macsec Ethernet0 MACsec port(Ethernet0) --------------------- ----------- cipher_suite GCM-AES-128 enable true enable_encrypt true enable_protect true enable_replay_protect false replay_window 0 send_sci true --------------------- ----------- MACsec Egress SC (52540067daa70001) ----------- - encoding_an 0 ----------- - MACsec Egress SA (0) ------------------------------------- -------------------------------- auth_key 9DDD4C69220A1FA9B6763F229B75CB6F next_pn 1 sak BA86574D054FCF48B9CD7CF54F21304A salt 000000000000000000000000 ssci 0 SAI_MACSEC_SA_ATTR_CURRENT_XPN 52 SAI_MACSEC_SA_STAT_OCTETS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OCTETS_PROTECTED 0 SAI_MACSEC_SA_STAT_OUT_PKTS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OUT_PKTS_PROTECTED 0 ------------------------------------- -------------------------------- MACsec Ingress SC (525400d4fd3f0001) MACsec Ingress SA (0) --------------------------------------- -------------------------------- active true auth_key 9DDD4C69220A1FA9B6763F229B75CB6F lowest_acceptable_pn 1 sak BA86574D054FCF48B9CD7CF54F21304A salt 000000000000000000000000 ssci 0 SAI_MACSEC_SA_ATTR_CURRENT_XPN 0 <---this counters was cleared. SAI_MACSEC_SA_STAT_IN_PKTS_DELAYED 0 SAI_MACSEC_SA_STAT_IN_PKTS_INVALID 0 SAI_MACSEC_SA_STAT_IN_PKTS_LATE 0 SAI_MACSEC_SA_STAT_IN_PKTS_NOT_USING_SA 0 SAI_MACSEC_SA_STAT_IN_PKTS_NOT_VALID 0 SAI_MACSEC_SA_STAT_IN_PKTS_OK 0 SAI_MACSEC_SA_STAT_IN_PKTS_UNCHECKED 0 SAI_MACSEC_SA_STAT_IN_PKTS_UNUSED_SA 0 SAI_MACSEC_SA_STAT_OCTETS_ENCRYPTED 0 SAI_MACSEC_SA_STAT_OCTETS_PROTECTED 0 --------------------------------------- -------------------------------- Signed-off-by: Ze Gan Co-authored-by: Judy Joseph --- .../cli/clear/plugins/clear_macsec_counter.py | 36 ++++++++++ .../cli/show/plugins/show_macsec.py | 72 +++++++++++++++---- rules/docker-macsec.mk | 1 + 3 files changed, 94 insertions(+), 15 deletions(-) create mode 100644 dockers/docker-macsec/cli/clear/plugins/clear_macsec_counter.py diff --git a/dockers/docker-macsec/cli/clear/plugins/clear_macsec_counter.py b/dockers/docker-macsec/cli/clear/plugins/clear_macsec_counter.py new file mode 100644 index 000000000000..b47a576f9ed2 --- /dev/null +++ b/dockers/docker-macsec/cli/clear/plugins/clear_macsec_counter.py @@ -0,0 +1,36 @@ +import os +import click + +import show.plugins.macsec as show_macsec +import utilities_common.cli as clicommon +from sonic_py_common import multi_asic + +@click.group(cls=clicommon.AliasedGroup) +def macsec(): + pass + + +@macsec.command('macsec') +@click.option('--clean-cache', type=bool, required=False, default=False, help="If the option of clean cache is true, next show commands will show the raw counters which based on the service booted instead of the last clear command.") +def macsec_clear_counters(clean_cache): + """ + Clear MACsec counts. + This clear command will generated a cache for next show commands which will base on this cache as the zero baseline to show the increment of counters. + """ + + if clean_cache: + for namespace in multi_asic.get_namespace_list(): + if os.path.isfile(show_macsec.CACHE_FILE.format(namespace)): + os.remove(show_macsec.CACHE_FILE.format(namespace)) + print("Cleaned cache") + return + + clicommon.run_command("show macsec --dump-file") + print("Clear MACsec counters") + +def register(cli): + cli.add_command(macsec_clear_counters) + + +if __name__ == '__main__': + macsec_clear_counters(None) diff --git a/dockers/docker-macsec/cli/show/plugins/show_macsec.py b/dockers/docker-macsec/cli/show/plugins/show_macsec.py index 3f1058df3572..9789b32bac6b 100644 --- a/dockers/docker-macsec/cli/show/plugins/show_macsec.py +++ b/dockers/docker-macsec/cli/show/plugins/show_macsec.py @@ -1,12 +1,19 @@ import typing from natsort import natsorted +import datetime +import pickle +import os +import copy import click from tabulate import tabulate import utilities_common.multi_asic as multi_asic_util from swsscommon.swsscommon import CounterTable, MacsecCounter +from utilities_common.cli import UserCache +CACHE_MANAGER = UserCache(app_name="macsec") +CACHE_FILE = os.path.join(CACHE_MANAGER.get_directory(), "macsecstats{}") DB_CONNECTOR = None COUNTER_TABLE = None @@ -15,12 +22,12 @@ class MACsecAppMeta(object): def __init__(self, *args) -> None: SEPARATOR = DB_CONNECTOR.get_db_separator(DB_CONNECTOR.APPL_DB) - key = self.__class__.get_appl_table_name() + SEPARATOR + \ + self.key = self.__class__.get_appl_table_name() + SEPARATOR + \ SEPARATOR.join(args) self.meta = DB_CONNECTOR.get_all( - DB_CONNECTOR.APPL_DB, key) + DB_CONNECTOR.APPL_DB, self.key) if len(self.meta) == 0: - raise ValueError("No such MACsecAppMeta: {}".format(key)) + raise ValueError("No such MACsecAppMeta: {}".format(self.key)) for k, v in self.meta.items(): setattr(self, k, v) @@ -39,10 +46,15 @@ def __init__(self, port_name: str, sci: str, an: str) -> None: MACsecAppMeta.__init__(self, port_name, sci, an) MACsecCounters.__init__(self, port_name, sci, an) - def dump_str(self) -> str: + def dump_str(self, cache = None) -> str: buffer = self.get_header() meta = sorted(self.meta.items(), key=lambda x: x[0]) - counters = sorted(self.counters.items(), key=lambda x: x[0]) + counters = copy.deepcopy(self.counters) + if cache: + for k, v in counters.items(): + if k in cache.counters: + counters[k] = int(counters[k]) - int(cache.counters[k]) + counters = sorted(counters.items(), key=lambda x: x[0]) buffer += tabulate(meta + counters) buffer = "\n".join(["\t\t" + line for line in buffer.splitlines()]) return buffer @@ -87,7 +99,7 @@ def __init__(self, port_name: str, sci: str) -> None: def get_appl_table_name(cls) -> str: return "MACSEC_INGRESS_SC_TABLE" - def dump_str(self) -> str: + def dump_str(self, cache = None) -> str: buffer = self.get_header() buffer = "\n".join(["\t" + line for line in buffer.splitlines()]) return buffer @@ -104,7 +116,7 @@ def __init__(self, port_name: str, sci: str) -> None: def get_appl_table_name(cls) -> str: return "MACSEC_EGRESS_SC_TABLE" - def dump_str(self) -> str: + def dump_str(self, cache = None) -> str: buffer = self.get_header() buffer += tabulate(sorted(self.meta.items(), key=lambda x: x[0])) buffer = "\n".join(["\t" + line for line in buffer.splitlines()]) @@ -123,7 +135,7 @@ def __init__(self, port_name: str) -> None: def get_appl_table_name(cls) -> str: return "MACSEC_PORT_TABLE" - def dump_str(self) -> str: + def dump_str(self, cache = None) -> str: buffer = self.get_header() buffer += tabulate(sorted(self.meta.items(), key=lambda x: x[0])) return buffer @@ -149,6 +161,7 @@ def create_macsec_obj(key: str) -> MACsecAppMeta: except ValueError as e: return None + def create_macsec_objs(interface_name: str) -> typing.List[MACsecAppMeta]: objs = [] objs.append(create_macsec_obj(MACsecPort.get_appl_table_name() + ":" + interface_name)) @@ -179,12 +192,25 @@ def create_macsec_objs(interface_name: str) -> typing.List[MACsecAppMeta]: return objs +def cache_find(cache: dict, target: MACsecAppMeta) -> MACsecAppMeta: + if not cache or not cache["objs"]: + return None + for obj in cache["objs"]: + if type(obj) == type(target) and obj.key == target.key: + # MACsec SA may be refreshed by a cycle that use the same key + # So, use the SA as the identifier + if isinstance(obj, MACsecSA) and obj.sak != target.sak: + continue + return obj + return None + + @click.command() @click.argument('interface_name', required=False) +@click.option('--dump-file', is_flag=True, required=False, default=False) @multi_asic_util.multi_asic_click_options -def macsec(interface_name, namespace, display): - MacsecContext(namespace, display).show(interface_name) - +def macsec(interface_name, dump_file, namespace, display): + MacsecContext(namespace, display).show(interface_name, dump_file) class MacsecContext(object): @@ -194,7 +220,7 @@ def __init__(self, namespace_option, display_option): display_option, namespace_option) @multi_asic_util.run_on_multi_asic - def show(self, interface_name): + def show(self, interface_name, dump_file): global DB_CONNECTOR global COUNTER_TABLE DB_CONNECTOR = self.db @@ -205,13 +231,29 @@ def show(self, interface_name): if interface_name not in interface_names: return interface_names = [interface_name] - objs = [] + for interface_name in natsorted(interface_names): objs += create_macsec_objs(interface_name) - for obj in objs: - print(obj.dump_str()) + cache = {} + if os.path.isfile(CACHE_FILE.format(self.multi_asic.current_namespace)): + cache = pickle.load(open(CACHE_FILE.format(self.multi_asic.current_namespace), "rb")) + + if not dump_file: + if cache and cache["time"] and objs: + print("Last cached time was {}".format(cache["time"])) + for obj in objs: + cache_obj = cache_find(cache, obj) + print(obj.dump_str(cache_obj)) + else: + dump_obj = { + "time": datetime.datetime.now(), + "objs": objs + } + with open(CACHE_FILE.format(self.multi_asic.current_namespace), 'wb') as dump_file: + pickle.dump(dump_obj, dump_file) + dump_file.flush() def register(cli): cli.add_command(macsec) diff --git a/rules/docker-macsec.mk b/rules/docker-macsec.mk index 3a6e9a558577..5db5ea5a41d9 100644 --- a/rules/docker-macsec.mk +++ b/rules/docker-macsec.mk @@ -44,5 +44,6 @@ $(DOCKER_MACSEC)_RUN_OPT += -v /host/warmboot:/var/warmboot $(DOCKER_MACSEC)_CLI_CONFIG_PLUGIN = /cli/config/plugins/macsec.py $(DOCKER_MACSEC)_CLI_SHOW_PLUGIN = /cli/show/plugins/show_macsec.py +$(DOCKER_MACSEC)_CLI_CLEAR_PLUGIN = /cli/clear/plugins/clear_macsec_counter.py $(DOCKER_MACSEC)_FILES += $(SUPERVISOR_PROC_EXIT_LISTENER_SCRIPT) From 3b9bbf7d2827cbb489e20b1af28ab48a75b46572 Mon Sep 17 00:00:00 2001 From: Muhammad Danish <88161975+mdanish-kh@users.noreply.github.com> Date: Wed, 7 Sep 2022 09:25:44 +0500 Subject: [PATCH 30/52] [doc]: Update README.md (#11960) * Remove faulty pipeline URLs * Miscellaneous minor fixes --- README.md | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 92684138845d..1f78c9f56d60 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,6 @@ [![Mellanox](https://dev.azure.com/mssonic/build/_apis/build/status/mellanox/Azure.sonic-buildimage.official.mellanox?branchName=master&label=Mellanox)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=139&branchName=master) [![Marvell(armhf)](https://dev.azure.com/mssonic/build/_apis/build/status/marvell/Azure.sonic-buildimage.official.marvell-armhf?branchName=master&label=Marvell-armhf)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=141&branchName=master) [![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=master&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=master) -[![P4](https://sonic-jenkins.westus2.cloudapp.azure.com/job/p4/job/buildimage-p4-all/badge/icon?subject=P4)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/p4/job/buildimage-p4-all) [![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=master&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=master) *202205 builds*: @@ -70,25 +69,13 @@ [![Nephos](https://dev.azure.com/mssonic/build/_apis/build/status/nephos/Azure.sonic-buildimage.official.nephos?branchName=201811&label=Nephos)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=149&branchName=201811) [![VS](https://dev.azure.com/mssonic/build/_apis/build/status/vs/Azure.sonic-buildimage.official.vs?branchName=201811&label=VS)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=142&branchName=201811) -*201807 builds*: - -[![Broadcom](https://sonic-jenkins.westus2.cloudapp.azure.com/job/broadcom/job/buildimage-brcm-201807/badge/icon?subject=Broadcom)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/broadcom/job/buildimage-brcm-201807/) -[![Barefoot](https://sonic-jenkins.westus2.cloudapp.azure.com/job/barefoot/job/buildimage-bf-201807/badge/icon?subject=Barefoot)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/barefoot/job/buildimage-bf-201807/) - -*201803 builds*: - -[![Broadcom](https://sonic-jenkins.westus2.cloudapp.azure.com/job/broadcom/job/buildimage-brcm-201803/badge/icon?subject=Broadcom)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/broadcom/job/buildimage-brcm-201803/) -[![Nephos](https://sonic-jenkins.westus2.cloudapp.azure.com/job/nephos/job/buildimage-nephos-201803/badge/icon?subject=Nephos)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/nephos/job/buildimage-nephos-201803/) -[![Marvell](https://sonic-jenkins.westus2.cloudapp.azure.com/job/marvell/job/buildimage-mrvl-201803/badge/icon?subject=Marvell)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/marvell/job/buildimage-mrvl-201803/) -[![Mellanox](https://sonic-jenkins.westus2.cloudapp.azure.com/job/mellanox/job/buildimage-mlnx-201803/badge/icon?subject=Mellanox)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/mellanox/job/buildimage-mlnx-201803/) - # sonic-buildimage ## Build SONiC Switch Images # Description -Following is the instruction on how to build an [(ONIE)](https://github.com/opencomputeproject/onie) compatible network operating system (NOS) installer image for network switches, and also how to build docker images running inside the NOS. Note that SONiC image are build per ASIC platform. Switches using the same ASIC platform share a common image. For a list of supported switches and ASIC, please refer to this [list](https://github.com/sonic-net/SONiC/wiki/Supported-Devices-and-Platforms) +Following are the instructions on how to build an [(ONIE)](https://github.com/opencomputeproject/onie) compatible network operating system (NOS) installer image for network switches, and also how to build docker images running inside the NOS. Note that SONiC images are build per ASIC platform. Switches using the same ASIC platform share a common image. For a list of supported switches and ASIC, please refer to this [list](https://github.com/sonic-net/SONiC/wiki/Supported-Devices-and-Platforms) # Hardware @@ -156,12 +143,14 @@ To build SONiC installer image and docker images, run the following commands: - PLATFORM=vs ## Usage for ARM Architecture -To build Arm32 bit for (ARMHF) platform - ARM build has dependency in docker version 18, - if docker version is 19, downgrade to 18 as below - sudo apt-get install --allow-downgrades -y docker-ce=5:18.09.0~3-0~ubuntu-xenial - sudo apt-get install --allow-downgrades -y docker-ce-cli=5:18.09.0~3-0~ubuntu-xenial +ARM build has dependency in docker version 18. If docker version is 19, downgrade to 18 with: +``` +sudo apt-get install --allow-downgrades -y docker-ce=5:18.09.0~3-0~ubuntu-xenial +sudo apt-get install --allow-downgrades -y docker-ce-cli=5:18.09.0~3-0~ubuntu-xenial +``` +To build Arm32 bit for (ARMHF) platform + # Execute make configure once to configure ASIC and ARCH make configure PLATFORM=[ASIC_VENDOR] PLATFORM_ARCH=armhf @@ -174,7 +163,7 @@ To build Arm32 bit for (ARMHF) platform make target/sonic-marvell-armhf.bin -To build Arm32 bit for (ARMHF) Marvell platform on amd64 host for debian buster using cross-compilation run the following commands: +To build Arm32 bit for (ARMHF) Marvell platform on amd64 host for debian buster using cross-compilation, run the following commands: # Execute make configure once to configure ASIC and ARCH for cross-compilation build @@ -239,7 +228,7 @@ Every target has a clean target, so in order to clean swss, execute: It is recommended to use clean targets to clean all packages that are built together, like dev packages for instance. In order to be more familiar with build process and make some changes to it, it is recommended to read this short [Documentation](README.buildsystem.md). ## Build debug dockers and debug SONiC installer image: -SONiC build system supports building dockers and ONIE-image with debug tools and debug symbols, to help with live & core debugging. For details refer to [(SONiC Buildimage Guide)](https://github.com/sonic-net/sonic-buildimage/blob/master/README.buildsystem.md). +SONiC build system supports building dockers and ONIE-image with debug tools and debug symbols, to help with live & core debugging. For details refer to [SONiC Buildimage Guide](https://github.com/sonic-net/sonic-buildimage/blob/master/README.buildsystem.md). ## SAI Version Please refer to [SONiC roadmap](https://github.com/sonic-net/SONiC/wiki/Sonic-Roadmap-Planning) on the SAI version for each SONiC release. From 38cc35f6da630b068048af02bf141e85d444dee8 Mon Sep 17 00:00:00 2001 From: UmaMaven <106967235+UmaMaven@users.noreply.github.com> Date: Wed, 7 Sep 2022 10:25:59 +0530 Subject: [PATCH 31/52] support for static-route yang model (#11932) *[Yang] support for static-route yang model #11932 --- src/sonic-yang-models/setup.py | 2 + .../tests/files/sample_config_db.json | 9 + .../yang_model_tests/tests/static_route.json | 57 + .../tests_config/static-route.json | 1028 +++++++++++++++++ .../yang-models/sonic-static-route.yang | 124 ++ 5 files changed, 1220 insertions(+) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/static_route.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/static-route.json create mode 100644 src/sonic-yang-models/yang-models/sonic-static-route.yang diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 975b84b68245..961017fc7ca0 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -158,6 +158,7 @@ def run(self): './yang-models/sonic-pfc-priority-queue-map.yang', './yang-models/sonic-pfc-priority-priority-group-map.yang', './yang-models/sonic-port-qos-map.yang', + './yang-models/sonic-static-route.yang', './yang-models/sonic-macsec.yang']), ('cvlyang-models', ['./cvlyang-models/sonic-acl.yang', './cvlyang-models/sonic-bgp-common.yang', @@ -214,6 +215,7 @@ def run(self): './cvlyang-models/sonic-pfc-priority-queue-map.yang', './cvlyang-models/sonic-pfc-priority-priority-group-map.yang', './cvlyang-models/sonic-port-qos-map.yang', + './cvlyang-models/sonic-static-route.yang', './cvlyang-models/sonic-macsec.yang']), ], zip_safe=False, diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index b833fab06ddd..ddf83de7f0a1 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -1997,6 +1997,15 @@ "console_mgmt": { "enabled": "yes" } + }, + "STATIC_ROUTE": { + "default|20.20.20.0/24": { + "blackhole": "false", + "distance": "1", + "ifname": "Ethernet14", + "nexthop": "10.184.229.212", + "nexthop-vrf": "default" + } } }, "SAMPLE_CONFIG_DB_UNKNOWN": { diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/static_route.json b/src/sonic-yang-models/tests/yang_model_tests/tests/static_route.json new file mode 100644 index 000000000000..a41a94a7fc9c --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/static_route.json @@ -0,0 +1,57 @@ +{ + "STATIC_ROUTE_TEST": { + "desc": "Configure basic static route with default VRFs with PREFIX" + }, + "STATIC_ROUTE_TEST_WITH_INTERFACE": { + "desc": "Configure with nexthop as interface instead of IP address" + }, + "STATIC_ROUTE_TEST_WITH_BLACKHOLE": { + "desc": "Configure with nexthop as blackhole" + }, + "STATIC_ROUTE_TEST_WITH_VRF": { + "desc": "Configure with routes in non default VRF" + }, + "STATIC_ROUTE_TEST_WITH_KEY_VRF_MGMT": { + "desc": "Configure with routes in mgmt VRF as key" + }, + "STATIC_ROUTE_TEST_WITH_VRF_MGMT": { + "desc": "Configure with routes with mgmt VRF as next hop" + }, + "STATIC_ROUTE_TEST_WITH_VRF_LEAK": { + "desc": "Configure with route leak across VRFS" + }, + "STATIC_ROUTE_TEST_ECMP": { + "desc": "Configure comma separated values for ECMP, with nexthop as PREFIX" + }, + "STATIC_ROUTE_TEST_ECMP_WITH_INTERFACE": { + "desc": "Configure comma separated values with nexthop as INTERFACE" + }, + "STATIC_ROUTE_TEST_ECMP_WITH_MGMT": { + "desc": "Configure comma separated values with one nexthop as mgmt" + }, + "STATIC_ROUTE_TEST_DISTANCE_INVALID": { + "desc": "Configure with invalid distance number", + "eStrKey": "Pattern" + }, + "STATIC_ROUTE_TEST_BLACKHOLE_INVALID": { + "desc": "Configure with invalid value for blackhole", + "eStrKey": "Pattern" + }, + "STATIC_ROUTE_TEST_NEXTHOP_VRF_INVALID": { + "desc": "Configure with invalid value for VRF", + "eStrKey": "Pattern" + }, + "STATIC_ROUTE_TEST_ECMP_DISTANCE_INVALID": { + "desc": "Configure with invalid distance for ECMP", + "eStrKey": "Pattern" + }, + "STATIC_ROUTE_TEST_ECMP_BLACKHOLE_INVALID": { + "desc": "Configure with invalid blackhop for ECMP", + "eStrKey": "Pattern" + }, + "STATIC_ROUTE_TEST_ECMP_NEXTHOP_VRF_INVALID": { + "desc": "Configure with invalid vrf for ECMP", + "eStrKey": "Pattern" + } + +} diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/static-route.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/static-route.json new file mode 100644 index 000000000000..3afdc2e8d25c --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/static-route.json @@ -0,0 +1,1028 @@ +{ + "STATIC_ROUTE_TEST": { + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "10.0.0.1/24", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "vrf_name": "default", + "prefix":"100.100.100.1/24", + "nexthop":"10.10.10.1", + "distance": "1", + "nexthop-vrf":"default", + "blackhole":"false" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_INTERFACE": { + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "10.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "vrf_name": "default", + "prefix":"100.100.100.1/24", + "nexthop":"0.0.0.0", + "ifname":"Ethernet8", + "distance": "1", + "nexthop-vrf":"default", + "blackhole":"false" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_BLACKHOLE": { + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "10.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "150.150.150.0/24", + "vrf_name": "default", + "ifname": "Ethernet8", + "distance": "1", + "nexthop-vrf": "default", + "blackhole": "true" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_VRF": { + "sonic-vrf:sonic-vrf": { + "VRF": { + "VRF_LIST": [ + { + "name": "VrfMav", + "fallback": true + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "75.75.75.0/24", + "vrf_name": "VrfMav", + "nexthop": "1.1.1.2", + "ifname": "Ethernet8", + "distance": "1", + "nexthop-vrf": "VrfMav", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_KEY_VRF_MGMT": { + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "75.75.75.0/24", + "vrf_name": "mgmt", + "nexthop": "1.1.1.2", + "ifname": "Ethernet8", + "distance": "1", + "nexthop-vrf": "mgmt", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_VRF_MGMT": { + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "75.75.75.0/24", + "vrf_name": "VrfMav", + "nexthop": "1.1.1.2", + "ifname": "Ethernet8", + "distance": "1", + "nexthop-vrf": "mgmt", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_WITH_VRF_LEAK": { + "sonic-vrf:sonic-vrf": { + "VRF": { + "VRF_LIST": [ + { + "name": "VrfAbc", + "fallback": true + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "85.85.85.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.2", + "ifname": "Ethernet8", + "distance": "1", + "nexthop-vrf": "VrfAbc", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "15.15.15.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.2,2.0.0.2", + "distance": "1,1", + "nexthop-vrf": "default,default", + "blackhole": "false,false" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP_WITH_MGMT": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "15.15.15.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.2,2.0.0.2", + "distance": "1,1", + "nexthop-vrf": "default,mgmt", + "blackhole": "false,false" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP_WITH_INTERFACE": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "0.0.0.0,0.0.0.0", + "ifname": "Ethernet0,Ethernet8", + "distance": "1,1", + "nexthop-vrf": "default,default", + "blackhole": "false,false" + }] + } + } + }, + "STATIC_ROUTE_TEST_DISTANCE_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.4", + "distance": "300", + "nexthop-vrf": "default", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_BLACKHOLE_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.5", + "distance": "1", + "nexthop-vrf": "default", + "blackhole": "down" + }] + } + } + }, + "STATIC_ROUTE_TEST_NEXTHOP_VRF_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.05", + "distance": "1", + "nexthop-vrf": "vrf123", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_NEXTHOP_VRF_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.05", + "distance": "1", + "nexthop-vrf": "vrf123", + "blackhole": "false" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP_DISTANCE_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.3,2.0.0.8", + "distance": "1000,1", + "nexthop-vrf": "default,default", + "blackhole": "false,false" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP_BLACKHOLE_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.4,2.0.0.3", + "distance": "1,1", + "nexthop-vrf": "default,default", + "blackhole": "no,no" + }] + } + } + }, + "STATIC_ROUTE_TEST_ECMP_NEXTHOP_VRF_INVALID": { + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth0", + "description": "Ethernet0", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet0", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "1.0.0.1/30", + "name": "Ethernet0", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet0" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": 9000, + "name": "Ethernet8", + "speed": 25000 + } + ] + } + }, + "sonic-interface:sonic-interface": { + "sonic-interface:INTERFACE": { + "INTERFACE_IPPREFIX_LIST": [ + { + "family": "IPv4", + "ip-prefix": "2.0.0.1/30", + "name": "Ethernet8", + "scope": "global" + } + ], + "INTERFACE_LIST": [ + { + "name": "Ethernet8" + } + ] + } + }, + "sonic-static-route:sonic-static-route": { + "sonic-static-route:STATIC_ROUTE": { + "STATIC_ROUTE_LIST": [{ + "prefix": "16.16.16.0/24", + "vrf_name": "default", + "nexthop": "1.0.0.4,2.0.0.3", + "distance": "1,1", + "nexthop-vrf": "vrf123,xyz", + "blackhole": "false,false" + }] + } + } + } + +} + + diff --git a/src/sonic-yang-models/yang-models/sonic-static-route.yang b/src/sonic-yang-models/yang-models/sonic-static-route.yang new file mode 100644 index 000000000000..48a562f242d9 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-static-route.yang @@ -0,0 +1,124 @@ +module sonic-static-route { + yang-version 1.1; + namespace "http://github.com/Azure/sonic-static-route"; + prefix sroute; + + import sonic-vrf { + prefix vrf; + } + import ietf-inet-types { + prefix inet; + } + import sonic-types { + prefix stypes; + } + + organization + "SONiC"; + contact + "SONiC"; + description + "STATIC ROUTE yang Module for SONiC OS"; + + revision 2022-03-17 { + description + "First Revision"; + } + + container sonic-static-route { + container STATIC_ROUTE { + description + "STATIC_ROUTE part of config_db.json"; + list STATIC_ROUTE_TEMPLATE_LIST { + key "prefix"; + leaf prefix { + type inet:ip-prefix; + description + "prefix is the destination IP address, as key"; + } + leaf nexthop { + type string; + description + "The next-hop that is to be used for the + static route as IP address. When interface needs to be + specified, use 0.0.0.0 as leaf value"; + } + leaf ifname { + type string; + description + "When interface is specified, forwarding happens through it"; + } + leaf advertise { + type string { + pattern "((true|false),)*(true|false)"; + } + default "false"; + } + } + list STATIC_ROUTE_LIST { + key "vrf_name prefix"; + leaf vrf_name { + type union { + type string { + pattern 'default'; + } + type string { + pattern 'mgmt'; + } + type string { + pattern "Vrf[a-zA-Z0-9_-]+"; + } + } + description + "Virtual Routing Instance name as key"; + } + leaf prefix { + type inet:ip-prefix; + description + "prefix is the destination IP address, as key"; + } + leaf nexthop { + type string; + description + "The next-hop that is to be used for the + static route as IP address. When interface needs to be + specified, use 0.0.0.0 as leaf value"; + } + leaf ifname { + type string; + description + "When interface is specified, forwarding happens through it"; + } + leaf distance { + type string { + pattern "((25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?),)*(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)"; + } + default "0"; + description + "Administrative Distance (preference) of the entry. The + preference defines the order of selection when multiple + sources (protocols, static, etc.) contribute to the same + prefix entry. The lower the preference, the more preferable the + prefix is. When this value is not specified, the preference is + inherited from the default preference of the implementation for + static routes."; + } + leaf nexthop-vrf { + type string { + pattern "(((Vrf[a-zA-Z0-9_-]+)|(default)|(mgmt)),)*((Vrf[a-zA-Z0-9_-]+)|(default)|(mgmt))"; + } + description + "VRF name of the nexthop. This is for vrf leaking"; + } + leaf blackhole { + type string { + pattern "((true|false),)*(true|false)"; + } + default "false"; + description + "blackhole refers to a route that, if matched, discards the message silently."; + } + } /* end of list STATIC_ROUTE_LIST */ + } /* end of container STATIC_ROUTE */ + } /* end of container sonic-static_route */ +} From 98d6357ae70d903c04e1ef883887a5e15d7be348 Mon Sep 17 00:00:00 2001 From: Liu Shilong Date: Thu, 8 Sep 2022 17:23:29 +0800 Subject: [PATCH 32/52] [actions] Remove approve step in label action. (#12015) Why I did it Approve step needs special permission settings. We already added permission setting to enable bypass merging PR. So, approve step is not necessary. --- .github/workflows/label.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml index ec04157110ae..5f8c0279b7e1 100644 --- a/.github/workflows/label.yml +++ b/.github/workflows/label.yml @@ -22,15 +22,6 @@ jobs: label: runs-on: ubuntu-latest steps: - - name: approve - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - run: | - set -e - echo ${{ secrets.GITHUB_TOKEN }} | gh auth login --with-token - url=$(echo $GITHUB_CONTEXT | jq -r '.event.pull_request._links.html.href') - echo PR: $url - gh pr review $url --approve - uses: actions/labeler@main with: repo-token: "${{ secrets.GITHUB_TOKEN }}" From 016f6718572542327f941a884a87245ab5d94725 Mon Sep 17 00:00:00 2001 From: Ze Gan Date: Thu, 8 Sep 2022 23:45:06 +0800 Subject: [PATCH 33/52] [docker-macsec]: Add dependencies of MACsec (#11770) Why I did it If the SWSS services was restarted, the MACsec service should also be restarted. Otherwise the data in wpa_supplicant and orchagent will not be consistent. How I did it Add dependency in docker-macsec.mk. How to verify it Manually check by 'sudo service swss restart'. The MACsec container should be started after swss, the syslog will look like Sep 8 14:36:29.562953 sonic INFO swss.sh[9661]: Starting existing swss container with HWSKU Force10-S6000 Sep 8 14:36:30.024399 sonic DEBUG container: container_start: BEGIN ... Sep 8 14:36:33.391706 sonic INFO systemd[1]: Starting macsec container... Sep 8 14:36:33.392925 sonic INFO systemd[1]: Starting Management Framework container... Signed-off-by: Ze Gan --- files/scripts/swss.sh | 16 +++++++++++++++- rules/docker-macsec.mk | 3 +++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/files/scripts/swss.sh b/files/scripts/swss.sh index 3ab4242c902f..3841f77cc30b 100755 --- a/files/scripts/swss.sh +++ b/files/scripts/swss.sh @@ -30,7 +30,7 @@ function read_dependent_services() fi if [[ -f ${ETC_SONIC_PATH}/${SERVICE}_multi_inst_dependent ]]; then - MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} cat ${ETC_SONIC_PATH}/${SERVICE}_multi_inst_dependent" + MULTI_INST_DEPENDENT="${MULTI_INST_DEPENDENT} $(cat ${ETC_SONIC_PATH}/${SERVICE}_multi_inst_dependent)" fi } @@ -308,6 +308,19 @@ function check_peer_gbsyncd() fi } +function check_macsec() +{ + MACSEC_STATE=`show feature status | grep macsec | awk '{print $2}'` + + if [[ ${MACSEC_STATE} == 'enabled' ]]; then + if [ "$DEV" ]; then + DEPENDENT="${DEPENDENT} macsec@${DEV}" + else + DEPENDENT="${DEPENDENT} macsec" + fi + fi +} + if [ "$DEV" ]; then NET_NS="$NAMESPACE_PREFIX$DEV" #name of the network namespace SONIC_DB_CLI="sonic-db-cli -n $NET_NS" @@ -319,6 +332,7 @@ else fi check_peer_gbsyncd +check_macsec read_dependent_services case "$1" in diff --git a/rules/docker-macsec.mk b/rules/docker-macsec.mk index 5db5ea5a41d9..d4cce3ecfcb7 100644 --- a/rules/docker-macsec.mk +++ b/rules/docker-macsec.mk @@ -42,6 +42,9 @@ $(DOCKER_MACSEC)_RUN_OPT += --privileged -t $(DOCKER_MACSEC)_RUN_OPT += -v /etc/sonic:/etc/sonic:ro $(DOCKER_MACSEC)_RUN_OPT += -v /host/warmboot:/var/warmboot +$(DOCKER_MACSEC)_SERVICE_REQUIRES = updategraph +$(DOCKER_MACSEC)_SERVICE_AFTER = swss syncd + $(DOCKER_MACSEC)_CLI_CONFIG_PLUGIN = /cli/config/plugins/macsec.py $(DOCKER_MACSEC)_CLI_SHOW_PLUGIN = /cli/show/plugins/show_macsec.py $(DOCKER_MACSEC)_CLI_CLEAR_PLUGIN = /cli/clear/plugins/clear_macsec_counter.py From dc9eaa53fb1e3a10c733c4ad381379f963cd773a Mon Sep 17 00:00:00 2001 From: bingwang-ms <66248323+bingwang-ms@users.noreply.github.com> Date: Fri, 9 Sep 2022 00:18:26 +0800 Subject: [PATCH 34/52] Map TC6 to Queue 1 for regular traffic (#11904) Why I did it This PR is to update TC_TO_QUEUE_MAP|AZURE for SKU Arista-7050CX3-32S-D48C8 and Arista-7260CX3 T0. The change is only to align the TC_TO_QUEUE_MAP for regular traffic and bounced traffic. It has no impact on business because we have no traffic being mapped to TC2 or TC6. How I did it Update TC_TO_QUEUE_MAP|AZURE , and test cases as well. How to verify it Verified by running test case test_j2files.py /sonic/src/sonic-config-engine$ python3 setup.py test -s tests/test_j2files.py running test ...... ---------------------------------------------------------------------- Ran 29 tests in 25.390s OK --- .../Arista-7050CX3-32S-D48C8/qos.json.j2 | 2 +- device/common/profiles/th2/7260/BALANCED/qos.json.j2 | 2 +- device/common/profiles/th2/7260/RDMA-CENTRIC/qos.json.j2 | 2 +- .../tests/sample_output/py3/qos-arista7050cx3-dualtor.json | 2 +- .../tests/sample_output/py3/qos-arista7260-dualtor.json | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/qos.json.j2 b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/qos.json.j2 index 6719911b29e5..040da33dd79f 100644 --- a/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/qos.json.j2 +++ b/device/arista/x86_64-arista_7050cx3_32s/Arista-7050CX3-32S-D48C8/qos.json.j2 @@ -170,7 +170,7 @@ "3": "3", "4": "4", "5": "5", - "6": "6", + "6": "1", "7": "7", "8": "1" }, diff --git a/device/common/profiles/th2/7260/BALANCED/qos.json.j2 b/device/common/profiles/th2/7260/BALANCED/qos.json.j2 index 68daa8ee92f0..750d0fb0ea77 100644 --- a/device/common/profiles/th2/7260/BALANCED/qos.json.j2 +++ b/device/common/profiles/th2/7260/BALANCED/qos.json.j2 @@ -322,7 +322,7 @@ "3": "3", "4": "4", "5": "5", - "6": "6", + "6": "1", "7": "7", "8": "1" }, diff --git a/device/common/profiles/th2/7260/RDMA-CENTRIC/qos.json.j2 b/device/common/profiles/th2/7260/RDMA-CENTRIC/qos.json.j2 index faf682d3c176..771b8305f8f0 100644 --- a/device/common/profiles/th2/7260/RDMA-CENTRIC/qos.json.j2 +++ b/device/common/profiles/th2/7260/RDMA-CENTRIC/qos.json.j2 @@ -321,7 +321,7 @@ "3": "3", "4": "4", "5": "5", - "6": "6", + "6": "1", "7": "7", "8": "1" }, diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json index 8e1d376f19db..00bf9a9a438c 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7050cx3-dualtor.json @@ -19,7 +19,7 @@ "3": "3", "4": "4", "5": "5", - "6": "6", + "6": "1", "7": "7", "8": "1" }, diff --git a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json index cdb6e8fe842e..4d16791f9287 100644 --- a/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json +++ b/src/sonic-config-engine/tests/sample_output/py3/qos-arista7260-dualtor.json @@ -19,7 +19,7 @@ "3": "3", "4": "4", "5": "5", - "6": "6", + "6": "1", "7": "7", "8": "1" }, From 19155df148fbb49eecd2ce4a3b8f022a66beab76 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Fri, 9 Sep 2022 00:33:01 +0800 Subject: [PATCH 35/52] Fix dbus-run-session command not found issue when install dbus-python (#12009) --- platform/vs/docker-sonic-vs/Dockerfile.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/platform/vs/docker-sonic-vs/Dockerfile.j2 b/platform/vs/docker-sonic-vs/Dockerfile.j2 index 6594630cc20f..7585b7f6f6e3 100644 --- a/platform/vs/docker-sonic-vs/Dockerfile.j2 +++ b/platform/vs/docker-sonic-vs/Dockerfile.j2 @@ -72,7 +72,8 @@ RUN apt-get install -y net-tools \ {%- if ENABLE_ASAN == "y" %} libasan5 \ {%- endif %} - libsystemd0 + libsystemd0 \ + dbus # Install redis-server {% if CONFIGURED_ARCH == "armhf" %} From 549bb3d4833425bcc2e8f0666ec0a13a995d54be Mon Sep 17 00:00:00 2001 From: Oleksandr Ivantsiv Date: Fri, 9 Sep 2022 00:16:11 +0200 Subject: [PATCH 36/52] [services] Update "WantedBy=" section for tacacs-config.timer. (#11893) The timer execution may fail if triggered during a config reload (when the sonic.target is stopped). This might happen in a rare situation if config reload is executed after reboot in a small time slot (for 0 to 30 seconds) before the tacacs-config timer is triggered. To ensure that timer execution will be resumed after a config reload the WantedBy section of the systemd service is updated to describe relation to sonic.target. Signed-off-by: Oleksandr Ivantsiv Signed-off-by: Oleksandr Ivantsiv --- files/build_templates/tacacs-config.timer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/build_templates/tacacs-config.timer b/files/build_templates/tacacs-config.timer index 28314e06f4cb..f8380cbab12d 100644 --- a/files/build_templates/tacacs-config.timer +++ b/files/build_templates/tacacs-config.timer @@ -9,4 +9,4 @@ OnBootSec=5min 30 sec Unit=tacacs-config.service [Install] -WantedBy=timers.target updategraph.service +WantedBy=timers.target sonic.target sonic-delayed.target From acc17db0c804c899dc7fb1dc4c28f14f81f320c1 Mon Sep 17 00:00:00 2001 From: ShiyanWangMS Date: Fri, 9 Sep 2022 22:18:13 +0800 Subject: [PATCH 37/52] Revert PR#11831 (#12035) "Upgrade docker-sonic-mgmt base image from Ubuntu18.04 to 20.04" --- dockers/docker-sonic-mgmt/Dockerfile.j2 | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/dockers/docker-sonic-mgmt/Dockerfile.j2 b/dockers/docker-sonic-mgmt/Dockerfile.j2 index 1275a21c5af2..42aaa3525a64 100755 --- a/dockers/docker-sonic-mgmt/Dockerfile.j2 +++ b/dockers/docker-sonic-mgmt/Dockerfile.j2 @@ -1,5 +1,5 @@ {% set prefix = DEFAULT_CONTAINER_REGISTRY %} -FROM {{ prefix }}ubuntu:20.04 +FROM {{ prefix }}ubuntu:18.04 ENV DEBIAN_FRONTEND=noninteractive @@ -21,6 +21,8 @@ RUN apt-get update && apt-get install -y build-essential \ psmisc \ python \ python-dev \ + python-scapy \ + python-pip \ python3-pip \ python3-venv \ rsyslog \ @@ -29,16 +31,10 @@ RUN apt-get update && apt-get install -y build-essential \ sudo \ tcpdump \ telnet \ - vim \ - python-is-python2 \ - software-properties-common - -RUN add-apt-repository -y universe -RUN curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py \ - && python2 get-pip.py + vim RUN pip install setuptools==44.1.1 -RUN pip install cffi==1.12.0 \ +RUN pip install cffi==1.10.0 \ contextlib2==0.6.0.post1 \ cryptography==3.3.2 \ "future>=0.16.0" \ @@ -100,7 +96,7 @@ RUN pip install cffi==1.12.0 \ && rm -f 1.0.0.tar.gz \ && pip install nnpy \ && pip install dpkt \ - && pip install scapy==2.4.5 --upgrade --ignore-installed + && pip install scapy==2.4.5 --upgrade # Install docker-ce-cli RUN apt-get update \ @@ -131,7 +127,7 @@ debs/{{ deb }}{{' '}} {%- endfor -%} debs/ -RUN dpkg --force-all -i \ +RUN dpkg -i \ {% for deb in docker_sonic_mgmt_debs.split(' ') -%} debs/{{ deb }}{{' '}} {%- endfor %} @@ -197,7 +193,8 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 PYTHONIOENCODING=UTF-8 -RUN python3 -m pip install --upgrade --ignore-installed pip setuptools==58.4.0 wheel==0.33.6 +RUN python3 -m pip install --upgrade --ignore-installed pip setuptools==58.4.0 + RUN python3 -m pip install setuptools-rust \ aiohttp \ defusedxml \ @@ -240,6 +237,7 @@ RUN python3 -m pip install setuptools-rust \ tabulate \ textfsm==1.1.2 \ virtualenv \ + wheel==0.33.6 \ pysubnettree \ nnpy \ dpkt \ From a2262394397e37f18d402dd6a45a645b35e70528 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Fri, 9 Sep 2022 08:41:41 -0700 Subject: [PATCH 38/52] [zebra] ignore route from default table (#12018) Signed-off-by: Ying Xie Signed-off-by: Ying Xie --- ...0009-ignore-route-from-default-table.patch | 30 +++++++++++++++++++ src/sonic-frr/patch/series | 1 + 2 files changed, 31 insertions(+) create mode 100644 src/sonic-frr/patch/0009-ignore-route-from-default-table.patch diff --git a/src/sonic-frr/patch/0009-ignore-route-from-default-table.patch b/src/sonic-frr/patch/0009-ignore-route-from-default-table.patch new file mode 100644 index 000000000000..ec41da74dad8 --- /dev/null +++ b/src/sonic-frr/patch/0009-ignore-route-from-default-table.patch @@ -0,0 +1,30 @@ +From bb3b003840959adf5b5be52e91bc798007c9857a Mon Sep 17 00:00:00 2001 +From: Ying Xie +Date: Thu, 8 Sep 2022 04:20:36 +0000 +Subject: [PATCH] From 776a29e8ab32c1364ee601a8730aabb773b0c86b Mon Sep 17 + 00:00:00 2001 Subject: [PATCH] ignore route from default table + +Signed-off-by: Ying Xie +--- + zebra/zebra_fpm_netlink.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/zebra/zebra_fpm_netlink.c b/zebra/zebra_fpm_netlink.c +index 34be9fb39..d6c875a7e 100644 +--- a/zebra/zebra_fpm_netlink.c ++++ b/zebra/zebra_fpm_netlink.c +@@ -283,6 +283,11 @@ static int netlink_route_info_fill(struct netlink_route_info *ri, int cmd, + rib_table_info(rib_dest_table(dest)); + struct zebra_vrf *zvrf = table_info->zvrf; + ++ if (table_info->table_id == RT_TABLE_DEFAULT) { ++ zfpm_debug("%s: Discard default table route", __func__); ++ return 0; ++ } ++ + memset(ri, 0, sizeof(*ri)); + + ri->prefix = rib_dest_prefix(dest); +-- +2.17.1 + diff --git a/src/sonic-frr/patch/series b/src/sonic-frr/patch/series index 3e8438bf6dd5..a474b918a8cc 100644 --- a/src/sonic-frr/patch/series +++ b/src/sonic-frr/patch/series @@ -8,3 +8,4 @@ 0008-Link-local-scope-was-not-set-while-binding-socket-for-bgp-ipv6-link-local-neighbors.patch Disable-ipv6-src-address-test-in-pceplib.patch cross-compile-changes.patch +0009-ignore-route-from-default-table.patch From 714b1807b6e528836c4af2e24d389d72518c0d7e Mon Sep 17 00:00:00 2001 From: anamehra <54692434+anamehra@users.noreply.github.com> Date: Fri, 9 Sep 2022 12:54:05 -0700 Subject: [PATCH 39/52] Fix radv.conf traceback when VLAN_INTERFACE is not defined (#12034) *Fix the if block scope to prevent traceback due to undefined vlan_list when VLAN_INTERFACE is not defined. --- dockers/docker-router-advertiser/radvd.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockers/docker-router-advertiser/radvd.conf.j2 b/dockers/docker-router-advertiser/radvd.conf.j2 index 3880ef960760..ded5c7e59694 100644 --- a/dockers/docker-router-advertiser/radvd.conf.j2 +++ b/dockers/docker-router-advertiser/radvd.conf.j2 @@ -23,7 +23,6 @@ {% set _ = vlan_list.update({name: prefix_list}) %} {% endif %} {% endfor %} -{% endif %} {% for name, prefixes in vlan_list.items() %} interface {{ name }} { @@ -47,3 +46,4 @@ interface {{ name }} }; {% endfor %} +{% endif %} From 966fe0d2108e63e745d35060a50c3b9b6da83bef Mon Sep 17 00:00:00 2001 From: Zain Budhwani <99770260+zbud-msft@users.noreply.github.com> Date: Fri, 9 Sep 2022 14:23:45 -0700 Subject: [PATCH 40/52] Update gnmi submodule (#11988) * Update gnmi submodule * Update gnmi pointer again --- src/sonic-gnmi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-gnmi b/src/sonic-gnmi index 92428dac99b2..14f91214fe2b 160000 --- a/src/sonic-gnmi +++ b/src/sonic-gnmi @@ -1 +1 @@ -Subproject commit 92428dac99b2d6d97b4c904cb83933cbc8f7e848 +Subproject commit 14f91214fe2b4ba7575bffff58989dbf7df71c48 From 0a8dd3f4f8357f2d470073917fa70cf491dc914b Mon Sep 17 00:00:00 2001 From: FuzailBrcm <51665572+FuzailBrcm@users.noreply.github.com> Date: Sat, 10 Sep 2022 03:03:12 +0530 Subject: [PATCH 41/52] Adding support for get/set low power mode for QSFPs in PDDF common APIs (#11786) * Adding support for get/set low pwer mode for QSFPs in PDDF common APIs * Adding support for get/set low pwer mode for QSFPs in PDDF common APIs - Review comments --- .../sonic_platform_pddf_base/pddf_sfp.py | 30 ++++++++++++++++--- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/platform/pddf/platform-api-pddf-base/sonic_platform_pddf_base/pddf_sfp.py b/platform/pddf/platform-api-pddf-base/sonic_platform_pddf_base/pddf_sfp.py index 66fe58543f51..be286bd9eaee 100644 --- a/platform/pddf/platform-api-pddf-base/sonic_platform_pddf_base/pddf_sfp.py +++ b/platform/pddf/platform-api-pddf-base/sonic_platform_pddf_base/pddf_sfp.py @@ -10,6 +10,8 @@ except ImportError as e: raise ImportError(str(e) + "- required module not found") +QSFP_PWR_CTRL_ADDR = 93 + class PddfSfp(SfpOptoeBase): """ @@ -194,8 +196,18 @@ def get_lpmode(self): else: lpmode = False else: - # Use common SfpOptoeBase implementation for get_lpmode - lpmode = super().get_lpmode() + xcvr_id = self._xcvr_api_factory._get_id() + if xcvr_id is not None: + if xcvr_id == 0x18 or xcvr_id == 0x19 or xcvr_id == 0x1e: + # QSFP-DD or OSFP + # Use common SfpOptoeBase implementation for get_lpmode + lpmode = super().get_lpmode() + elif xcvr_id == 0x11 or xcvr_id == 0x0d or xcvr_id == 0x0c: + # QSFP28, QSFP+, QSFP + power_set = self.get_power_set() + power_override = self.get_power_override() + # By default the lpmode pin is pulled high as mentioned in the sff community + return power_set if power_override else True return lpmode @@ -321,8 +333,18 @@ def set_lpmode(self, lpmode): except IOError as e: status = False else: - # Use common SfpOptoeBase implementation for set_lpmode - status = super().set_lpmode(lpmode) + xcvr_id = self._xcvr_api_factory._get_id() + if xcvr_id is not None: + if xcvr_id == 0x18 or xcvr_id == 0x19 or xcvr_id == 0x1e: + # QSFP-DD or OSFP + # Use common SfpOptoeBase implementation for set_lpmode + status = super().set_lpmode(lpmode) + elif xcvr_id == 0x11 or xcvr_id == 0x0d or xcvr_id == 0x0c: + # QSFP28, QSFP+, QSFP + if lpmode is True: + self.set_power_override(True, True) + else: + self.set_power_override(True, False) return status From c53972b34864865a854e75df4ec6bc78ebb1c6cf Mon Sep 17 00:00:00 2001 From: Hasan Naqvi <56742004+hasan-brcm@users.noreply.github.com> Date: Fri, 9 Sep 2022 17:05:48 -0700 Subject: [PATCH 42/52] Update submodule to FRR 8.2.2 (#11502) *The sonic-frr was upgraded to FRR 8.2.2 as part of PR #10691. However, sonic-frr/frr submodule was still referring to previous 7.5 version. Update the sonic-frr/frr submodule to 8.2.2 commit id. Fixes issue #11484. --- src/sonic-frr/frr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sonic-frr/frr b/src/sonic-frr/frr index c69608a68083..79188bf710e9 160000 --- a/src/sonic-frr/frr +++ b/src/sonic-frr/frr @@ -1 +1 @@ -Subproject commit c69608a68083d1017257977bd0260bebdb12322f +Subproject commit 79188bf710e92acf42fb5b9b0a2e9593a5ee9b05 From a8076e303bebc7907b54f8f0e612424ce67df9dc Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Mon, 12 Sep 2022 20:31:29 +0800 Subject: [PATCH 43/52] Upgrade the sonic-fips packages to 0.3 (#12040) Why I did it Upgrade the sonic-fips packages to release 0.3 Fix the package timestamp not correct issue --- rules/sonic-fips.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rules/sonic-fips.mk b/rules/sonic-fips.mk index a63e00de3fa5..e5b6e4ad3547 100644 --- a/rules/sonic-fips.mk +++ b/rules/sonic-fips.mk @@ -1,6 +1,6 @@ # fips packages -FIPS_VERSION = 0.2 +FIPS_VERSION = 0.3 FIPS_OPENSSL_VERSION = 1.1.1n-0+deb11u3+fips FIPS_OPENSSH_VERSION = 8.4p1-5+deb11u1+fips FIPS_PYTHON_MAIN_VERSION = 3.9 From b34d94be1f22e94759775d72286769e2feae0f76 Mon Sep 17 00:00:00 2001 From: jcaiMR <111116206+jcaiMR@users.noreply.github.com> Date: Tue, 13 Sep 2022 10:07:17 +0800 Subject: [PATCH 44/52] yang model table DEVICE_NEIGHBOR_METADATA creation (#11894) * yang mode support for neighbor metadata * add description in leaf node * modify description --- src/sonic-config-engine/minigraph.py | 40 +++++-- .../tests/test_minigraph_case.py | 39 +++---- .../tests/test_multinpu_cfggen.py | 12 +- src/sonic-yang-models/setup.py | 2 + .../tests/files/sample_config_db.json | 20 ++++ .../tests/device_neighbor_metadata.json | 15 +++ .../device_neighbor_metadata.json | 107 ++++++++++++++++++ .../sonic-device_neighbor_metadata.yang | 100 ++++++++++++++++ 8 files changed, 301 insertions(+), 34 deletions(-) create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests/device_neighbor_metadata.json create mode 100644 src/sonic-yang-models/tests/yang_model_tests/tests_config/device_neighbor_metadata.json create mode 100644 src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang diff --git a/src/sonic-config-engine/minigraph.py b/src/sonic-config-engine/minigraph.py index 9518678bb5b7..85d9a91030ca 100644 --- a/src/sonic-config-engine/minigraph.py +++ b/src/sonic-config-engine/minigraph.py @@ -260,14 +260,24 @@ def parse_png(png, hname, dpg_ecmp_content = None): if child.tag == str(QName(ns, "Devices")): for device in child.findall(str(QName(ns, "Device"))): (lo_prefix, lo_prefix_v6, mgmt_prefix, mgmt_prefix_v6, name, hwsku, d_type, deployment_id, cluster, d_subtype) = parse_device(device) - device_data = {'lo_addr': lo_prefix, 'type': d_type, 'mgmt_addr': mgmt_prefix, 'hwsku': hwsku} - if cluster: + device_data = {} + if hwsku != None: + device_data['hwsku'] = hwsku + if cluster != None: device_data['cluster'] = cluster - if deployment_id: + if deployment_id != None: device_data['deployment_id'] = deployment_id - if lo_prefix_v6: + if lo_prefix != None: + device_data['lo_addr'] = lo_prefix + if lo_prefix_v6 != None: device_data['lo_addr_v6'] = lo_prefix_v6 - if d_subtype: + if mgmt_prefix != None: + device_data['mgmt_addr'] = mgmt_prefix + if mgmt_prefix_v6 != None: + device_data['mgmt_addr_v6'] = mgmt_prefix_v6 + if d_type != None: + device_data['type'] = d_type + if d_subtype != None: device_data['subtype'] = d_subtype devices[name] = device_data @@ -393,13 +403,23 @@ def parse_asic_png(png, asic_name, hostname): if child.tag == str(QName(ns, "Devices")): for device in child.findall(str(QName(ns, "Device"))): (lo_prefix, lo_prefix_v6, mgmt_prefix, mgmt_prefix_v6, name, hwsku, d_type, deployment_id, cluster, _) = parse_device(device) - device_data = {'lo_addr': lo_prefix, 'type': d_type, 'mgmt_addr': mgmt_prefix, 'hwsku': hwsku } - if cluster: + device_data = {} + if hwsku != None: + device_data['hwsku'] = hwsku + if cluster != None: device_data['cluster'] = cluster - if deployment_id: + if deployment_id != None: device_data['deployment_id'] = deployment_id - if lo_prefix_v6: - device_data['lo_addr_v6']= lo_prefix_v6 + if lo_prefix != None: + device_data['lo_addr'] = lo_prefix + if lo_prefix_v6 != None: + device_data['lo_addr_v6'] = lo_prefix_v6 + if mgmt_prefix != None: + device_data['mgmt_addr'] = mgmt_prefix + if mgmt_prefix_v6 != None: + device_data['mgmt_addr_v6'] = mgmt_prefix_v6 + if d_type != None: + device_data['type'] = d_type devices[name] = device_data return (neighbors, devices, port_speeds) diff --git a/src/sonic-config-engine/tests/test_minigraph_case.py b/src/sonic-config-engine/tests/test_minigraph_case.py index 2aa78944163b..9ee8a49db6ae 100644 --- a/src/sonic-config-engine/tests/test_minigraph_case.py +++ b/src/sonic-config-engine/tests/test_minigraph_case.py @@ -203,39 +203,40 @@ def test_minigraph_neighbor_metadata(self): argument = '-m "' + self.sample_graph + '" -p "' + self.port_config + '" -v "DEVICE_NEIGHBOR_METADATA"' expected_table = { - 'switch-01t1': { - 'lo_addr': '10.1.0.186/32', - 'mgmt_addr': '10.7.0.196/26', - 'hwsku': 'Force10-S6000', - 'type': 'LeafRouter', - 'deployment_id': '2' - }, 'switch2-t0': { - 'hwsku': 'Force10-S6000', 'lo_addr': '25.1.1.10/32', 'mgmt_addr': '10.7.0.196/26', + 'hwsku': 'Force10-S6000', 'type': 'ToRRouter' }, - 'server1': { + 'server2': { + 'lo_addr_v6': 'fe80::0002/128', + 'lo_addr': '10.10.10.2/32', + 'mgmt_addr': '10.0.0.2/32', 'hwsku': 'server-sku', - 'lo_addr': '10.10.10.1/32', - 'lo_addr_v6': 'fe80::0001/80', - 'mgmt_addr': '10.0.0.1/32', 'type': 'Server' }, - 'server2': { + 'server1': { + 'lo_addr_v6': 'fe80::0001/80', + 'lo_addr': '10.10.10.1/32', + 'mgmt_addr': '10.0.0.1/32', 'hwsku': 'server-sku', - 'lo_addr': '10.10.10.2/32', - 'lo_addr_v6': 'fe80::0002/128', - 'mgmt_addr': '10.0.0.2/32', 'type': 'Server' }, + 'switch-01t1': { + 'lo_addr': '10.1.0.186/32', + 'deployment_id': '2', + 'hwsku': 'Force10-S6000', + 'type': 'LeafRouter', + 'mgmt_addr': '10.7.0.196/26' + }, 'server1-SC': { - 'hwsku': 'smartcable-sku', - 'lo_addr': '0.0.0.0/0', 'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', - 'type': 'SmartCable' + 'hwsku': 'smartcable-sku', + 'lo_addr': '0.0.0.0/0', + 'type': 'SmartCable', + 'mgmt_addr_v6': '::/0', } } output = self.run_script(argument) diff --git a/src/sonic-config-engine/tests/test_multinpu_cfggen.py b/src/sonic-config-engine/tests/test_multinpu_cfggen.py index a34b0b6cfd78..2bfb879a1ad2 100644 --- a/src/sonic-config-engine/tests/test_multinpu_cfggen.py +++ b/src/sonic-config-engine/tests/test_multinpu_cfggen.py @@ -250,10 +250,11 @@ def test_frontend_asic_device_neigh(self): def test_frontend_asic_device_neigh_metadata(self): argument = "-m {} -p {} -n asic0 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[0]) output = json.loads(self.run_script(argument)) + print(output) self.assertDictEqual(output, \ - {'01T2': {'lo_addr': None, 'mgmt_addr': '89.139.132.40', 'hwsku': 'VM', 'type': 'SpineRouter'}, - 'ASIC3': {'lo_addr': '0.0.0.0/0', 'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'type': 'Asic'}, - 'ASIC2': {'lo_addr': '0.0.0.0/0', 'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'type': 'Asic'}}) + {'01T2': {'mgmt_addr': '89.139.132.40', 'hwsku': 'VM', 'type': 'SpineRouter'}, + 'ASIC3': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}, + 'ASIC2': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_backend_asic_device_neigh(self): argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR\"".format(self.sample_graph, self.port_config[3]) @@ -267,9 +268,10 @@ def test_backend_asic_device_neigh(self): def test_backend_device_neigh_metadata(self): argument = "-m {} -p {} -n asic3 --var-json \"DEVICE_NEIGHBOR_METADATA\"".format(self.sample_graph, self.port_config[3]) output = json.loads(self.run_script(argument)) + print(output) self.assertDictEqual(output, \ - {'ASIC1': {'lo_addr': '0.0.0.0/0', 'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'type': 'Asic'}, - 'ASIC0': {'lo_addr': '0.0.0.0/0', 'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'type': 'Asic'}}) + {'ASIC1': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}, + 'ASIC0': {'lo_addr_v6': '::/0', 'mgmt_addr': '0.0.0.0/0', 'hwsku': 'multi-npu-asic', 'lo_addr': '0.0.0.0/0', 'type': 'Asic', 'mgmt_addr_v6': '::/0'}}) def test_frontend_bgp_neighbor(self): argument = "-m {} -p {} -n asic0 --var-json \"BGP_NEIGHBOR\"".format(self.sample_graph, self.port_config[0]) diff --git a/src/sonic-yang-models/setup.py b/src/sonic-yang-models/setup.py index 961017fc7ca0..aee9d4650c7a 100644 --- a/src/sonic-yang-models/setup.py +++ b/src/sonic-yang-models/setup.py @@ -103,6 +103,7 @@ def run(self): './yang-models/sonic-default-lossless-buffer-parameter.yang', './yang-models/sonic-device_metadata.yang', './yang-models/sonic-device_neighbor.yang', + './yang-models/sonic-device_neighbor_metadata.yang', './yang-models/sonic-dhcpv6-relay.yang', './yang-models/sonic-extension.yang', './yang-models/sonic-flex_counter.yang', @@ -173,6 +174,7 @@ def run(self): './cvlyang-models/sonic-crm.yang', './cvlyang-models/sonic-device_metadata.yang', './cvlyang-models/sonic-device_neighbor.yang', + './cvlyang-models/sonic-device_neighbor_metadata.yang', './cvlyang-models/sonic-extension.yang', './cvlyang-models/sonic-flex_counter.yang', './cvlyang-models/sonic-feature.yang', diff --git a/src/sonic-yang-models/tests/files/sample_config_db.json b/src/sonic-yang-models/tests/files/sample_config_db.json index ddf83de7f0a1..6c1f47d80f51 100644 --- a/src/sonic-yang-models/tests/files/sample_config_db.json +++ b/src/sonic-yang-models/tests/files/sample_config_db.json @@ -391,6 +391,26 @@ "port": "Eth18" } }, + "DEVICE_NEIGHBOR_METADATA": { + "dccsw01.nw": { + "lo_addr": "0.0.0.0/0", + "mgmt_addr": "10.184.228.211/32", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + "dccsw02.nw": { + "mgmt_addr_v6": "2a04:5555:40:a709::2/128", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + "dccsw03.nw": { + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + } + }, "MGMT_PORT": { "eth0": { "alias": "eth0", diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests/device_neighbor_metadata.json b/src/sonic-yang-models/tests/yang_model_tests/tests/device_neighbor_metadata.json new file mode 100644 index 000000000000..2943a53d1b91 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests/device_neighbor_metadata.json @@ -0,0 +1,15 @@ +{ + "DEVICE_NEIGHBOR_METADATA_TABLE": { + "desc": "DEVICE_NEIGHBOR_METADATA_TABLE config pattern." + }, + "DEVICE_NEIGHBOR_METADATA_TYPE_INCORRECT_PATTERN": { + "desc": "DEVICE_NEIGHBOR_METADATA_TYPE_INCORRECT_PATTERN pattern failure.", + "eStrKey" : "Pattern" + }, + "DEVICE_NEIGHBOR_METADATA_TYPE_CORRECT_PATTERN": { + "desc": "DEVICE_NEIGHBOR_METADATA correct value for Type field" + }, + "DEVICE_NEIGHBOR_METADATA_TYPE_NOT_PROVISIONED_PATTERN": { + "desc": "DEVICE_NEIGHBOR_METADATA value as not-provisioned for Type field" + } +} \ No newline at end of file diff --git a/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_neighbor_metadata.json b/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_neighbor_metadata.json new file mode 100644 index 000000000000..ecc0c35d2208 --- /dev/null +++ b/src/sonic-yang-models/tests/yang_model_tests/tests_config/device_neighbor_metadata.json @@ -0,0 +1,107 @@ +{ + "DEVICE_NEIGHBOR_METADATA_TYPE_INCORRECT_PATTERN": { + "sonic-device_neighbor_metadata:sonic-device_neighbor_metadata": { + "sonic-device_neighbor_metadata:DEVICE_NEIGHBOR_METADATA": { + "DEVICE_NEIGHBOR_METADATA_LIST": [ + { + "name": "Ethernet116", + "hwsku": "Arista", + "type": "ToRrouter" + } + ] + } + } + }, + "DEVICE_NEIGHBOR_METADATA_TYPE_CORRECT_PATTERN": { + "sonic-device_neighbor_metadata:sonic-device_neighbor_metadata": { + "sonic-device_neighbor_metadata:DEVICE_NEIGHBOR_METADATA": { + "DEVICE_NEIGHBOR_METADATA_LIST": [ + { + "name": "Ethernet116", + "hwsku": "Arista", + "type": "BackEndToRRouter" + } + ] + } + } + }, + "DEVICE_NEIGHBOR_METADATA_TYPE_NOT_PROVISIONED_PATTERN": { + "sonic-device_neighbor_metadata:sonic-device_neighbor_metadata": { + "sonic-device_neighbor_metadata:DEVICE_NEIGHBOR_METADATA": { + "DEVICE_NEIGHBOR_METADATA_LIST": [ + { + "name": "Ethernet116", + "hwsku": "Arista", + "type": "not-provisioned" + } + ] + } + } + }, + "DEVICE_NEIGHBOR_METADATA_TABLE": { + "sonic-device_neighbor_metadata:sonic-device_neighbor_metadata": { + "sonic-device_neighbor_metadata:DEVICE_NEIGHBOR_METADATA": { + "DEVICE_NEIGHBOR_METADATA_LIST": [ + { + "lo_addr": "25.77.193.11/32", + "mgmt_addr": "0.0.0.0/0", + "name": "dccsw01.nw", + "hwsku": "Arista", + "type": "ToRRouter", + "deployment_id": "1" + }, + { + "lo_addr": "0.0.0.0/0", + "mgmt_addr": "10.11.150.46/26", + "name": "dccsw02.nw", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + { + "lo_addr_v6": "2a04:5555:40:a709::2/126", + "mgmt_addr": "10.11.150.47/26", + "name": "dccsw03.nw", + "hwsku": "Arista", + "type": "SpineRouter", + "deployment_id": "1" + }, + { + "name": "dccsw04.nw", + "mgmt_addr_v6": "2a04:5555:40:a708::2/126", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + { + "name": "dccsw05.nw", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + { + "lo_addr_v6": "2a04:5555:40:a710::2/126", + "name": "dccsw06.nw", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + { + "lo_addr": "25.77.193.11/32", + "name": "dccsw07.nw", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + }, + { + "mgmt_addr": "10.11.150.48/26", + "name": "dccsw08.nw", + "hwsku": "Arista", + "type": "LeafRouter", + "deployment_id": "1" + } + ] + } + } + } +} diff --git a/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang b/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang new file mode 100644 index 000000000000..76526f801c92 --- /dev/null +++ b/src/sonic-yang-models/yang-models/sonic-device_neighbor_metadata.yang @@ -0,0 +1,100 @@ +module sonic-device_neighbor_metadata { + + yang-version 1.1; + + namespace "http://github.com/Azure/sonic-device_neighbor_metadata"; + prefix device_neighbor_metadata; + + import ietf-yang-types { + prefix yang; + } + + import ietf-inet-types { + prefix inet; + } + + import sonic-types { + prefix stypes; + } + + description "DEVICE_NEIGHBOR_METADATA YANG Module for SONiC OS"; + + revision 2022-08-25 { + description "First Revision"; + } + + container sonic-device_neighbor_metadata { + + container DEVICE_NEIGHBOR_METADATA { + + description "DEVICE_NEIGHBOR_METADATA part of config_db.json"; + + list DEVICE_NEIGHBOR_METADATA_LIST { + + key "name"; + + leaf name { + description "Host name string, max length 255"; + type string { + length 1..255; + } + } + + leaf hwsku { + type stypes:hwsku; + } + + leaf lo_addr { + description "Device loopback ipv4 address, type of ietf-inet + ipv4-prefix or ipv4-address"; + type union { + type inet:ipv4-prefix; + type inet:ipv4-address; + } + } + + leaf lo_addr_v6 { + description "Device loopback ipv6 address, type of ietf-inet + ipv6-prefix or ipv6-address"; + type union { + type inet:ipv6-prefix; + type inet:ipv6-address; + } + } + + leaf mgmt_addr { + description "Device management ipv4 address, type of ietf-inet + ipv4-prefix or ipv4-address"; + type union { + type inet:ipv4-prefix; + type inet:ipv4-address; + } + } + + leaf mgmt_addr_v6 { + description "Device management ipv6 address, type of ietf-inet + ipv6-prefix or ipv6-address"; + type union { + type inet:ipv6-prefix; + type inet:ipv6-address; + } + } + + leaf type { + description "Network element type"; + type string { + pattern "ToRRouter|LeafRouter|SpineChassisFrontendRouter|ChassisBackendRouter|ASIC|Asic|Supervior|MgmtToRRouter|SpineRouter|BackEndToRRouter|BackEndLeafRouter|EPMS|MgmtTsToR|BmcMgmtToRRouter|Server|MiniPower|SmartCable|Ixia|not-provisioned"; + } + } + + leaf deployment_id { + type uint32; + } + } + /* end of list DEVICE_NEIGHBOR_METADATA_LIST */ + } + /* end of container DEVICE_NEIGHBOR_METADATA */ + } + /* end of container sonic-device_neighbor_metadata */ +} +/* end of module sonic-device_neighbor_metadata */ From 7d1b99a8868ed97971377cf6d30adfcf2d0252d6 Mon Sep 17 00:00:00 2001 From: Mai Bui Date: Tue, 13 Sep 2022 06:52:17 -0700 Subject: [PATCH 45/52] Replace unsafe functions in iccpd (#11694) Why I did it Replace unsafe functions in iccpd How I did it Replace memset() by zero initialization Replace strtok() by strtok_r() Signed-off-by: maipbui --- src/iccpd/src/cmd_option.c | 8 ++++---- src/iccpd/src/iccp_ifm.c | 12 ++++-------- src/iccpd/src/iccp_main.c | 3 +-- src/iccpd/src/iccp_netlink.c | 24 +++++++++--------------- src/iccpd/src/mclagdctl/mclagdctl.c | 3 +-- 5 files changed, 19 insertions(+), 31 deletions(-) diff --git a/src/iccpd/src/cmd_option.c b/src/iccpd/src/cmd_option.c index 596dafb73711..d6e7b14ef7ab 100644 --- a/src/iccpd/src/cmd_option.c +++ b/src/iccpd/src/cmd_option.c @@ -80,26 +80,26 @@ struct CmdOption* cmd_option_add(struct CmdOptionParser* parser, char* opt_name) static void cmd_option_register(struct CmdOptionParser* parser, char* syntax, char* desc) { - char buf[OPTION_MAX_LEN]; + char buf[OPTION_MAX_LEN] = {0}; struct CmdOption* opt = NULL; char* opt_name = NULL; char* param = NULL; char* desc_copy = NULL; char* token = NULL; + char* saveptr; if (parser == NULL) return; if (syntax == NULL) return; - memset(buf, 0, OPTION_MAX_LEN); snprintf(buf, OPTION_MAX_LEN - 1, "%s", syntax); - if ((token = strtok(buf, " ")) == NULL) + if ((token = strtok_r(buf, " ", &saveptr)) == NULL) return; opt_name = strdup(token); - if ((token = strtok(NULL, " ")) != NULL) + if ((token = strtok_r(NULL, " ", &saveptr)) != NULL) param = strdup(token); desc_copy = strdup(desc); if ((opt = cmd_option_find(parser, opt_name)) != NULL) diff --git a/src/iccpd/src/iccp_ifm.c b/src/iccpd/src/iccp_ifm.c index 4349599f5308..5d6d0540e32b 100644 --- a/src/iccpd/src/iccp_ifm.c +++ b/src/iccpd/src/iccp_ifm.c @@ -128,7 +128,7 @@ static void do_arp_learn_from_kernel(struct ndmsg *ndm, struct rtattr *tb[], int uint16_t vlan_id = 0; struct VLAN_ID vlan_key = { 0 }; - char buf[MAX_BUFSIZE]; + char buf[MAX_BUFSIZE] = { 0 }; size_t msg_len = 0; struct LocalInterface *lif_po = NULL, *arp_lif = NULL; @@ -144,7 +144,6 @@ static void do_arp_learn_from_kernel(struct ndmsg *ndm, struct rtattr *tb[], int return; /* create ARP msg*/ - memset(buf, 0, MAX_BUFSIZE); msg_len = sizeof(struct ARPMsg); arp_msg = (struct ARPMsg *)&buf; arp_msg->op_type = NEIGH_SYNC_LIF; @@ -388,7 +387,7 @@ static void do_ndisc_learn_from_kernel(struct ndmsg *ndm, struct rtattr *tb[], i uint16_t vlan_id = 0; struct VLAN_ID vlan_key = { 0 }; - char buf[MAX_BUFSIZE]; + char buf[MAX_BUFSIZE] = { 0 }; size_t msg_len = 0; char addr_null[16] = { 0 }; @@ -406,7 +405,6 @@ static void do_ndisc_learn_from_kernel(struct ndmsg *ndm, struct rtattr *tb[], i return; /* create NDISC msg */ - memset(buf, 0, MAX_BUFSIZE); msg_len = sizeof(struct NDISCMsg); ndisc_msg = (struct NDISCMsg *)&buf; ndisc_msg->op_type = NEIGH_SYNC_LIF; @@ -815,7 +813,7 @@ void do_arp_update_from_reply_packet(unsigned int ifindex, unsigned int addr, ui uint16_t vlan_id = 0; struct VLAN_ID vlan_key = { 0 }; - char buf[MAX_BUFSIZE]; + char buf[MAX_BUFSIZE] = { 0 }; size_t msg_len = 0; struct LocalInterface *lif_po = NULL, *arp_lif = NULL; @@ -830,7 +828,6 @@ void do_arp_update_from_reply_packet(unsigned int ifindex, unsigned int addr, ui return; /* create ARP msg*/ - memset(buf, 0, MAX_BUFSIZE); msg_len = sizeof(struct ARPMsg); arp_msg = (struct ARPMsg*)&buf; arp_msg->op_type = NEIGH_SYNC_LIF; @@ -1033,7 +1030,7 @@ void do_ndisc_update_from_reply_packet(unsigned int ifindex, char *ipv6_addr, ui struct LocalInterface *peer_link_if = NULL; int is_link_local = 0; - char buf[MAX_BUFSIZE]; + char buf[MAX_BUFSIZE] = { 0 }; size_t msg_len = 0; char addr_null[16] = { 0 }; uint16_t vlan_id = 0; @@ -1053,7 +1050,6 @@ void do_ndisc_update_from_reply_packet(unsigned int ifindex, char *ipv6_addr, ui sprintf(mac_str, "%02x:%02x:%02x:%02x:%02x:%02x", mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]); /* create Ndisc msg */ - memset(buf, 0, MAX_BUFSIZE); msg_len = sizeof(struct NDISCMsg); ndisc_msg = (struct NDISCMsg *)&buf; ndisc_msg->op_type = NEIGH_SYNC_LIF; diff --git a/src/iccpd/src/iccp_main.c b/src/iccpd/src/iccp_main.c index fe3594583a51..5c852bade142 100644 --- a/src/iccpd/src/iccp_main.c +++ b/src/iccpd/src/iccp_main.c @@ -143,7 +143,7 @@ static int iccpd_signal_init(struct System* sys) int fds[2]; int err; sigset_t ss; - struct sigaction sa; + struct sigaction sa = { 0 }; struct epoll_event event; err = pipe(fds); @@ -171,7 +171,6 @@ static int iccpd_signal_init(struct System* sys) goto close_pipe; } - memset(&sa, 0, sizeof(sa)); sa.sa_handler = iccpd_signal_handler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; diff --git a/src/iccpd/src/iccp_netlink.c b/src/iccpd/src/iccp_netlink.c index 1143f370ffc5..6464a209896a 100644 --- a/src/iccpd/src/iccp_netlink.c +++ b/src/iccpd/src/iccp_netlink.c @@ -461,7 +461,7 @@ void iccp_set_interface_ipadd_mac(struct LocalInterface *lif, char * mac_addr ) { struct IccpSyncdHDr * msg_hdr; mclag_sub_option_hdr_t * sub_msg; - char msg_buf[4096]; + char msg_buf[4096] = { 0 }; struct System *sys; int src_len = 0, dst_len = 0; @@ -470,8 +470,6 @@ void iccp_set_interface_ipadd_mac(struct LocalInterface *lif, char * mac_addr ) if (sys == NULL) return; - memset(msg_buf, 0, 4095); - msg_hdr = (struct IccpSyncdHDr *)msg_buf; msg_hdr->ver = 1; msg_hdr->type = MCLAG_MSG_TYPE_SET_MAC; @@ -572,9 +570,10 @@ static int iccp_netlink_set_portchannel_iff_flag( { int rv, ret_rv = 0; char* token; + char* saveptr; struct LocalInterface* member_if; char *tmp_member_buf = NULL; - + if (!lif_po) return MCLAG_ERROR; @@ -592,7 +591,7 @@ static int iccp_netlink_set_portchannel_iff_flag( lif_po->portchannel_member_buf); } /* Port-channel members are stored as comma separated strings */ - token = strtok(tmp_member_buf, ","); + token = strtok_r(tmp_member_buf, ",", &saveptr); while (token != NULL) { member_if = local_if_find_by_name(token); @@ -616,7 +615,7 @@ static int iccp_netlink_set_portchannel_iff_flag( "Can't find member %s:%s, if_up(%d), location %d", lif_po->name, token, is_iff_up, location); } - token = strtok(NULL, ","); + token = strtok_r(NULL, ",", &saveptr); } if (tmp_member_buf) free(tmp_member_buf); @@ -1942,14 +1941,12 @@ int iccp_receive_ndisc_packet_handler(struct System *sys) struct nd_msg *ndmsg = NULL; struct nd_opt_hdr *nd_opt = NULL; struct in6_addr target; - uint8_t mac_addr[ETHER_ADDR_LEN]; + uint8_t mac_addr[ETHER_ADDR_LEN] = { 0 }; int8_t *opt = NULL; int opt_len = 0, l = 0; int len; struct CSM* csm = NULL; - memset(mac_addr, 0, ETHER_ADDR_LEN); - /* Fill in message and iovec. */ msg.msg_name = (void *)(&from); msg.msg_namelen = sizeof(struct sockaddr_in6); @@ -2375,9 +2372,9 @@ void recover_vlan_if_mac_on_standby(struct LocalInterface* lif_vlan, int dir, ui struct CSM *csm = NULL; struct System* sys = NULL; uint8_t null_mac[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - char macaddr[64]; - char remote_macaddr[64]; - uint8_t system_mac[ETHER_ADDR_LEN]; + char macaddr[64] = { 0 }; + char remote_macaddr[64] = { 0 }; + uint8_t system_mac[ETHER_ADDR_LEN] = { 0 }; int ret = 0; int vid = 0; @@ -2404,9 +2401,6 @@ void recover_vlan_if_mac_on_standby(struct LocalInterface* lif_vlan, int dir, ui sscanf (lif_vlan->name, "Vlan%d", &vid); - memset(macaddr, 0, 64); - memset(remote_macaddr, 0, 64); - memset(system_mac, 0, ETHER_ADDR_LEN); ICCPD_LOG_DEBUG(__FUNCTION__, " ifname %s, l3_proto %d, dir %d\n", lif_vlan->name, lif_vlan->is_l3_proto_enabled, dir); if (lif_vlan->is_l3_proto_enabled == true) diff --git a/src/iccpd/src/mclagdctl/mclagdctl.c b/src/iccpd/src/mclagdctl/mclagdctl.c index 2eccd944636a..eb6c8878df08 100644 --- a/src/iccpd/src/mclagdctl/mclagdctl.c +++ b/src/iccpd/src/mclagdctl/mclagdctl.c @@ -164,7 +164,7 @@ static struct command_type command_types[] = int mclagdctl_sock_connect() { - struct sockaddr_un addr; + struct sockaddr_un addr = { 0 }; int addrlen = 0; int ret = 0; @@ -181,7 +181,6 @@ int mclagdctl_sock_connect() return MCLAG_ERROR; } - memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; snprintf(addr.sun_path, sizeof(addr.sun_path) - 1, "%s", mclagdctl_sock_path); addrlen = sizeof(addr.sun_family) + strlen(mclagdctl_sock_path); From 055fbf5aaaf4b8269cd598931f2c3fc3a41987cd Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Wed, 14 Sep 2022 04:39:49 +0200 Subject: [PATCH 46/52] [Arista] Update platform submodules (#12020) --- platform/barefoot/sonic-platform-modules-arista | 2 +- platform/broadcom/sonic-platform-modules-arista | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/platform/barefoot/sonic-platform-modules-arista b/platform/barefoot/sonic-platform-modules-arista index b65a69a9e1c2..e12a04b24c5f 160000 --- a/platform/barefoot/sonic-platform-modules-arista +++ b/platform/barefoot/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit b65a69a9e1c2c876ba5210ce8b2a1cc9b5c8b18f +Subproject commit e12a04b24c5f752a9ca789d62bb7b94c563e1c4b diff --git a/platform/broadcom/sonic-platform-modules-arista b/platform/broadcom/sonic-platform-modules-arista index b65a69a9e1c2..e12a04b24c5f 160000 --- a/platform/broadcom/sonic-platform-modules-arista +++ b/platform/broadcom/sonic-platform-modules-arista @@ -1 +1 @@ -Subproject commit b65a69a9e1c2c876ba5210ce8b2a1cc9b5c8b18f +Subproject commit e12a04b24c5f752a9ca789d62bb7b94c563e1c4b From c674b3c63486082a0d426ae64e58f70480205ddc Mon Sep 17 00:00:00 2001 From: kannankvs Date: Wed, 14 Sep 2022 10:19:05 +0530 Subject: [PATCH 47/52] [doc]: Updated PR Template for a comment to add label/tag for the feature raised. (#12058) Signed-off-by: kannankvs --- .github/pull_request_template.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 1ff6883573d2..d553f7a4d0c4 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -39,6 +39,8 @@ Write a short (one line) summary that describes the changes in this pull request for inclusion in the changelog: --> +#### Ensure to add label/tag for the feature raised. example - [PR#2174](https://github.com/sonic-net/sonic-utilities/pull/2174) where, Generic Config and Update feature has been labelled as GCU. + #### Link to config_db schema for YANG module changes