From 18cda3e062a8aacd0e904caf32a106a8858548ba Mon Sep 17 00:00:00 2001 From: Payal Chaurasiya Date: Fri, 27 Dec 2024 16:50:33 +0530 Subject: [PATCH 1/4] Replace xml.etree.ElementTree.parse with its defusedxml (#1230) * Replace xml.etree.ElementTree.parse with its defusedxml Signed-off-by: Chaurasiya, Payal * convert to json Signed-off-by: Chaurasiya, Payal * Fix memory logs and create pdf Signed-off-by: Chaurasiya, Payal --------- Signed-off-by: Chaurasiya, Payal --- test-requirements.txt | 3 + .../test_suites/memory_logs_tests.py | 27 ++++- tests/end_to_end/utils/generate_report.py | 101 ++++++++++++++++++ tests/end_to_end/utils/summary_helper.py | 4 +- 4 files changed, 131 insertions(+), 4 deletions(-) create mode 100644 tests/end_to_end/utils/generate_report.py diff --git a/test-requirements.txt b/test-requirements.txt index bb2fc0421b..446d67e9af 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,3 +4,6 @@ paramiko pytest==8.3.4 pytest-asyncio==0.25.0 pytest-mock==3.14.0 +defusedxml==0.7.1 +matplotlib==3.10.0 +fpdf==1.7.2 \ No newline at end of file diff --git a/tests/end_to_end/test_suites/memory_logs_tests.py b/tests/end_to_end/test_suites/memory_logs_tests.py index b152cd0852..d0957a8336 100644 --- a/tests/end_to_end/test_suites/memory_logs_tests.py +++ b/tests/end_to_end/test_suites/memory_logs_tests.py @@ -9,6 +9,7 @@ from tests.end_to_end.utils.common_fixtures import fx_federation_tr, fx_federation_tr_dws import tests.end_to_end.utils.constants as constants from tests.end_to_end.utils import federation_helper as fed_helper, ssh_helper as ssh +from tests.end_to_end.utils.generate_report import generate_memory_report log = logging.getLogger(__name__) @@ -78,7 +79,9 @@ def _log_memory_usage(request, fed_obj): ), "Aggregator memory usage file is not available" # Log the aggregator memory usage details - memory_usage_dict = json.load(open(aggregator_memory_usage_file)) + memory_usage_dict = _convert_to_json(aggregator_memory_usage_file) + aggregator_path = os.path.join(fed_obj.workspace_path, "aggregator") + generate_memory_report(memory_usage_dict, aggregator_path) # check memory usage entries for each round assert ( @@ -98,10 +101,30 @@ def _log_memory_usage(request, fed_obj): collaborator_memory_usage_file ), f"Memory usage file for collaborator {collaborator.collaborator_name} is not available" - memory_usage_dict = json.load(open(collaborator_memory_usage_file)) + memory_usage_dict = _convert_to_json(collaborator_memory_usage_file) + collaborator_path = os.path.join(fed_obj.workspace_path, collaborator.name) + generate_memory_report(memory_usage_dict, collaborator_path) assert ( len(memory_usage_dict) == request.config.num_rounds ), f"Memory usage details are not available for all rounds for collaborator {collaborator.collaborator_name}" log.info("Memory usage details are available for all participants") + + +def _convert_to_json(file): + """ + Reads a file containing JSON objects, one per line, and converts them into a list of parsed JSON objects. + + Args: + file (str): The path to the file containing JSON objects. + + Returns: + list: A list of parsed JSON objects. + """ + with open(file, 'r') as infile: + json_objects = infile.readlines() + + # Parse each JSON object + parsed_json_objects = [json.loads(obj) for obj in json_objects] + return parsed_json_objects diff --git a/tests/end_to_end/utils/generate_report.py b/tests/end_to_end/utils/generate_report.py new file mode 100644 index 0000000000..879a103608 --- /dev/null +++ b/tests/end_to_end/utils/generate_report.py @@ -0,0 +1,101 @@ +import pandas as pd +import matplotlib.pyplot as plt +import numpy as np +from scipy.stats import linregress +from fpdf import FPDF + +class PDF(FPDF): + def header(self): + self.set_font('Arial', 'B', 14) + + def chapter_title(self, title): + self.add_page() + self.set_font('Arial', 'B', 14) # Set font to bold for title + self.cell(0, 10, title, 0, 1, 'L') + + def chapter_body(self, body): + self.set_font('Arial', '', 12) + self.multi_cell(0, 10, body) + +def generate_memory_report(memory_usage_dict, workspace_path): + """ + Generates a memory usage report from a CSV file. + + Parameters: + file_path (str): The path to the CSV file containing memory usage data. + + Returns: + None + """ + # Load data + data = pd.DataFrame(memory_usage_dict) + + # Plotting the chart + plt.figure(figsize=(10, 5)) + plt.plot(data['round_number'], data['virtual_memory/used'], marker='o') + plt.title('Memory Usage per Round') + plt.xlabel('round_number') + plt.ylabel('Virtual Memory Used (MB)') + plt.grid(True) + output_path = f"{workspace_path}/mem_usage_plot.png" + plt.savefig(output_path) + plt.close() + + # Calculate statistics + min_mem = round(data['virtual_memory/used'].min(), 2) + max_mem = round(data['virtual_memory/used'].max(), 2) + mean_mem = round(data['virtual_memory/used'].mean(), 2) + variance_mem = round(data['virtual_memory/used'].var(), 2) + std_dev_mem = round(data['virtual_memory/used'].std(), 2) + slope, _, _, _, _ = linregress(data.index, data['virtual_memory/used']) + slope = round(slope, 2) + stats_path = f"{workspace_path}/mem_stats.txt" + with open(stats_path, 'w') as file: + file.write(f"Minimum Memory Used: {min_mem} MB\n") + file.write(f"Maximum Memory Used: {max_mem} MB\n") + file.write(f"Mean Memory Used: {mean_mem} MB\n") + file.write(f"Variance: {variance_mem}\n") + file.write(f"Standard Deviation: {std_dev_mem}\n") + file.write(f"Slope: {slope}\n") + + # Generate PDF report + pdf = PDF() + add_introduction(pdf) + add_chart_analysis(pdf, output_path, data) + add_statistical_overview(pdf, stats_path) + add_conclusion(pdf, slope) + pdf_output_path = f"{workspace_path}/MemAnalysis.pdf" + pdf.output(pdf_output_path) + + print("Memory report generation completed. Report saved to:", pdf_output_path) + +def add_introduction(pdf): + pdf.chapter_title('Introduction') + intro_text = ("The purpose of this memory analysis is to identify memory usage trends and potential bottlenecks. " + "This analysis focuses on the relationship between round information and memory usage.") + pdf.chapter_body(intro_text) + +def add_chart_analysis(pdf, output_path, data): + pdf.chapter_title('Chart Analysis') + pdf.image(output_path, w=180) + diffs = data['virtual_memory/used'].diff().round(2) + significant_changes = diffs[diffs.abs() > 500] + for index, value in significant_changes.items(): + pdf.chapter_body(f"Significant memory change: {value} MB at Round {data['round_number'][index]}") + +def add_statistical_overview(pdf, stats_path): + pdf.chapter_title('Statistical Overview') + with open(stats_path, 'r') as file: + stats = file.read() + pdf.chapter_body(stats) + +def add_conclusion(pdf, slope): + pdf.chapter_title('Conclusion') + if slope > 0: + conclusion_text = "The upward slope in the graph indicates a trend of increasing memory usage over rounds." + else: + conclusion_text = "There is no continuous memory growth." + pdf.chapter_body(conclusion_text) + +# Uncomment the following line to run the function directly when this script is executed +# generate_memory_report('/home/sys_tpe_st_svc_acct/memory_leak/mem_info_aggr.csv') diff --git a/tests/end_to_end/utils/summary_helper.py b/tests/end_to_end/utils/summary_helper.py index a832a281c7..25b29ad9fd 100644 --- a/tests/end_to_end/utils/summary_helper.py +++ b/tests/end_to_end/utils/summary_helper.py @@ -1,7 +1,7 @@ # Copyright 2020-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import xml.etree.ElementTree as ET +from defusedxml.ElementTree import parse as defused_parse from lxml import etree import os from pathlib import Path @@ -17,7 +17,7 @@ print(f"Results XML file not found at {result_xml}. Exiting...") exit(1) -tree = ET.parse(result_xml, parser=parser) +tree = defused_parse(result_xml, parser=parser) # Get the root element testsuites = tree.getroot() From e4305f76a462d22130222cbaf31b2526472de140 Mon Sep 17 00:00:00 2001 From: Preethi Date: Thu, 2 Jan 2025 13:26:19 +0530 Subject: [PATCH 2/4] Build openfl container image and add PR pipeline for all branches (#1231) * Create openfl-docker-build.yml * remove test event --- .github/workflows/docker-bench-security.yml | 1 - .github/workflows/double_ws_export.yml | 1 - .../workflows/experimental_workflow_tests.yml | 5 +- .github/workflows/gandlf.yml | 1 - .github/workflows/hadolint.yml | 1 - .github/workflows/interactive-kvasir.yml | 1 - .github/workflows/interactive-tensorflow.yml | 1 - .github/workflows/lint.yml | 1 - .github/workflows/openfl-docker-build.yml | 47 +++++++++++++++++++ .github/workflows/pki.yml | 1 - .github/workflows/pytest_coverage.yml | 1 - .github/workflows/straggler-handling.yml | 1 - .github/workflows/taskrunner.yml | 1 - .../workflows/taskrunner_eden_pipeline.yml | 1 - .../workflows/tr_docker_gramine_direct.yml | 1 - .github/workflows/tr_docker_native.yml | 1 - .github/workflows/trivy.yml | 5 +- .github/workflows/wf_functional_e2e.yml | 1 - .../workflow_interface_101_mnist.yml | 1 - 19 files changed, 53 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/openfl-docker-build.yml diff --git a/.github/workflows/docker-bench-security.yml b/.github/workflows/docker-bench-security.yml index a64282a309..8b56d199e2 100644 --- a/.github/workflows/docker-bench-security.yml +++ b/.github/workflows/docker-bench-security.yml @@ -2,7 +2,6 @@ name: Docker Bench for Security on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/double_ws_export.yml b/.github/workflows/double_ws_export.yml index a33aea7d64..da2304ffa8 100644 --- a/.github/workflows/double_ws_export.yml +++ b/.github/workflows/double_ws_export.yml @@ -5,7 +5,6 @@ name: Double workspace export on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/experimental_workflow_tests.yml b/.github/workflows/experimental_workflow_tests.yml index 84166ab362..5f8391317d 100644 --- a/.github/workflows/experimental_workflow_tests.yml +++ b/.github/workflows/experimental_workflow_tests.yml @@ -2,9 +2,10 @@ name: Workflow Interface Tests on: push: - branches: [ develop ] + branches: + - develop + - v1.7.x pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/gandlf.yml b/.github/workflows/gandlf.yml index 0bd700212a..d80d8f443c 100644 --- a/.github/workflows/gandlf.yml +++ b/.github/workflows/gandlf.yml @@ -5,7 +5,6 @@ name: GaNDLF TaskRunner on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml index a529491bdb..59d852e7c4 100644 --- a/.github/workflows/hadolint.yml +++ b/.github/workflows/hadolint.yml @@ -5,7 +5,6 @@ name: Hadolint Security Scan on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/interactive-kvasir.yml b/.github/workflows/interactive-kvasir.yml index 93ff09126c..12b38d618f 100644 --- a/.github/workflows/interactive-kvasir.yml +++ b/.github/workflows/interactive-kvasir.yml @@ -5,7 +5,6 @@ name: Interactive API - Pytorch Kvasir UNet on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/interactive-tensorflow.yml b/.github/workflows/interactive-tensorflow.yml index 110ee1b175..8ff870b09f 100644 --- a/.github/workflows/interactive-tensorflow.yml +++ b/.github/workflows/interactive-tensorflow.yml @@ -5,7 +5,6 @@ name: Interactive API - Tensorflow MNIST on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 1556bad3b3..36d7fdc41e 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -5,7 +5,6 @@ name: Check code format on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/openfl-docker-build.yml b/.github/workflows/openfl-docker-build.yml new file mode 100644 index 0000000000..ae19d6b37e --- /dev/null +++ b/.github/workflows/openfl-docker-build.yml @@ -0,0 +1,47 @@ +name: Build and Push Openfl Docker Image + +on: + push: + branches: + - develop + - v1.7.x + workflow_dispatch: + inputs: + version: + description: 'Version to tag the Docker image with' + required: false + default: 'latest' + +env: + VERSION: ${{ github.ref == 'refs/heads/develop' && 'latest' || '1.7' }} + +permissions: + contents: read + packages: write + +jobs: + build-and-push: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Log in to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build and Push OpenFL Base Image + uses: docker/build-push-action@v4 + with: + context: . + file: openfl-docker/Dockerfile.base + push: true + tags: | + ghcr.io/${{ github.repository }}/openfl:${{ github.event.inputs.version || 'latest' }} diff --git a/.github/workflows/pki.yml b/.github/workflows/pki.yml index 704fbbfcea..bf907273ed 100644 --- a/.github/workflows/pki.yml +++ b/.github/workflows/pki.yml @@ -5,7 +5,6 @@ name: Private Key Infrastructure on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/pytest_coverage.yml b/.github/workflows/pytest_coverage.yml index 18fa9f25fe..a5ea0c14d9 100644 --- a/.github/workflows/pytest_coverage.yml +++ b/.github/workflows/pytest_coverage.yml @@ -5,7 +5,6 @@ name: Pytest and code coverage on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: diff --git a/.github/workflows/straggler-handling.yml b/.github/workflows/straggler-handling.yml index 4f2bdb7dd1..450caf8e8a 100644 --- a/.github/workflows/straggler-handling.yml +++ b/.github/workflows/straggler-handling.yml @@ -5,7 +5,6 @@ name: Straggler Handling Test on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/taskrunner.yml b/.github/workflows/taskrunner.yml index 59abb67251..a9093be4c1 100644 --- a/.github/workflows/taskrunner.yml +++ b/.github/workflows/taskrunner.yml @@ -5,7 +5,6 @@ name: TaskRunner on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/taskrunner_eden_pipeline.yml b/.github/workflows/taskrunner_eden_pipeline.yml index b6d2426c46..3430f89463 100644 --- a/.github/workflows/taskrunner_eden_pipeline.yml +++ b/.github/workflows/taskrunner_eden_pipeline.yml @@ -5,7 +5,6 @@ name: TaskRunner (Eden Compression) on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/tr_docker_gramine_direct.yml b/.github/workflows/tr_docker_gramine_direct.yml index d02526edb7..309351f385 100644 --- a/.github/workflows/tr_docker_gramine_direct.yml +++ b/.github/workflows/tr_docker_gramine_direct.yml @@ -3,7 +3,6 @@ name: TaskRunner (docker/gramine-direct) on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/tr_docker_native.yml b/.github/workflows/tr_docker_native.yml index b3382553ae..f5af424a18 100644 --- a/.github/workflows/tr_docker_native.yml +++ b/.github/workflows/tr_docker_native.yml @@ -3,7 +3,6 @@ name: TaskRunner (docker/native) on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] permissions: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 7c26b672d3..039e241652 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -1,9 +1,10 @@ name: Trivy on: push: - branches: [ develop ] + branches: + - develop + - v1.7.x pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] jobs: build: diff --git a/.github/workflows/wf_functional_e2e.yml b/.github/workflows/wf_functional_e2e.yml index c51b7acc03..923aa73bae 100644 --- a/.github/workflows/wf_functional_e2e.yml +++ b/.github/workflows/wf_functional_e2e.yml @@ -5,7 +5,6 @@ name: Workflow_Functional_E2E on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: diff --git a/.github/workflows/workflow_interface_101_mnist.yml b/.github/workflows/workflow_interface_101_mnist.yml index 8f6e5e7267..57e1dae46e 100644 --- a/.github/workflows/workflow_interface_101_mnist.yml +++ b/.github/workflows/workflow_interface_101_mnist.yml @@ -6,7 +6,6 @@ name: Workflow Interface 101 MNIST Notebook on: pull_request: - branches: [ develop ] types: [opened, synchronize, reopened, ready_for_review] workflow_dispatch: From ca94e42fce924f2c91be61b320cfd94c76a75531 Mon Sep 17 00:00:00 2001 From: Noopur Date: Fri, 3 Jan 2025 10:05:27 +0530 Subject: [PATCH 3/4] Using aggregator metric file for summary (#1239) Signed-off-by: noopur --- tests/end_to_end/models/model_owner.py | 3 + .../test_suites/memory_logs_tests.py | 25 +----- tests/end_to_end/utils/generate_report.py | 80 ++++++++++++------- tests/end_to_end/utils/summary_helper.py | 32 +++----- 4 files changed, 70 insertions(+), 70 deletions(-) diff --git a/tests/end_to_end/models/model_owner.py b/tests/end_to_end/models/model_owner.py index 8e66968ea9..f2d77f360a 100644 --- a/tests/end_to_end/models/model_owner.py +++ b/tests/end_to_end/models/model_owner.py @@ -145,6 +145,9 @@ def modify_plan(self, param_config, plan_path): # Memory Leak related data["aggregator"]["settings"]["log_memory_usage"] = self.log_memory_usage data["collaborator"]["settings"]["log_memory_usage"] = self.log_memory_usage + # Aggregator and collaborators metric logging related + data["aggregator"]["settings"]["write_logs"] = True + data["collaborator"]["settings"]["write_logs"] = True data["data_loader"]["settings"]["collaborator_count"] = int(self.num_collaborators) data["network"]["settings"]["require_client_auth"] = param_config.require_client_auth diff --git a/tests/end_to_end/test_suites/memory_logs_tests.py b/tests/end_to_end/test_suites/memory_logs_tests.py index d0957a8336..662099273f 100644 --- a/tests/end_to_end/test_suites/memory_logs_tests.py +++ b/tests/end_to_end/test_suites/memory_logs_tests.py @@ -4,12 +4,11 @@ import pytest import logging import os -import json from tests.end_to_end.utils.common_fixtures import fx_federation_tr, fx_federation_tr_dws import tests.end_to_end.utils.constants as constants from tests.end_to_end.utils import federation_helper as fed_helper, ssh_helper as ssh -from tests.end_to_end.utils.generate_report import generate_memory_report +from tests.end_to_end.utils.generate_report import generate_memory_report, convert_to_json log = logging.getLogger(__name__) @@ -79,7 +78,7 @@ def _log_memory_usage(request, fed_obj): ), "Aggregator memory usage file is not available" # Log the aggregator memory usage details - memory_usage_dict = _convert_to_json(aggregator_memory_usage_file) + memory_usage_dict = convert_to_json(aggregator_memory_usage_file) aggregator_path = os.path.join(fed_obj.workspace_path, "aggregator") generate_memory_report(memory_usage_dict, aggregator_path) @@ -101,7 +100,7 @@ def _log_memory_usage(request, fed_obj): collaborator_memory_usage_file ), f"Memory usage file for collaborator {collaborator.collaborator_name} is not available" - memory_usage_dict = _convert_to_json(collaborator_memory_usage_file) + memory_usage_dict = convert_to_json(collaborator_memory_usage_file) collaborator_path = os.path.join(fed_obj.workspace_path, collaborator.name) generate_memory_report(memory_usage_dict, collaborator_path) @@ -110,21 +109,3 @@ def _log_memory_usage(request, fed_obj): ), f"Memory usage details are not available for all rounds for collaborator {collaborator.collaborator_name}" log.info("Memory usage details are available for all participants") - - -def _convert_to_json(file): - """ - Reads a file containing JSON objects, one per line, and converts them into a list of parsed JSON objects. - - Args: - file (str): The path to the file containing JSON objects. - - Returns: - list: A list of parsed JSON objects. - """ - with open(file, 'r') as infile: - json_objects = infile.readlines() - - # Parse each JSON object - parsed_json_objects = [json.loads(obj) for obj in json_objects] - return parsed_json_objects diff --git a/tests/end_to_end/utils/generate_report.py b/tests/end_to_end/utils/generate_report.py index 879a103608..1e014fa3d1 100644 --- a/tests/end_to_end/utils/generate_report.py +++ b/tests/end_to_end/utils/generate_report.py @@ -1,22 +1,27 @@ +# Copyright 2020-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + import pandas as pd import matplotlib.pyplot as plt -import numpy as np +import json from scipy.stats import linregress from fpdf import FPDF + class PDF(FPDF): def header(self): - self.set_font('Arial', 'B', 14) + self.set_font("Arial", "B", 14) def chapter_title(self, title): self.add_page() - self.set_font('Arial', 'B', 14) # Set font to bold for title - self.cell(0, 10, title, 0, 1, 'L') + self.set_font("Arial", "B", 14) # Set font to bold for title + self.cell(0, 10, title, 0, 1, "L") def chapter_body(self, body): - self.set_font('Arial', '', 12) + self.set_font("Arial", "", 12) self.multi_cell(0, 10, body) + def generate_memory_report(memory_usage_dict, workspace_path): """ Generates a memory usage report from a CSV file. @@ -32,25 +37,25 @@ def generate_memory_report(memory_usage_dict, workspace_path): # Plotting the chart plt.figure(figsize=(10, 5)) - plt.plot(data['round_number'], data['virtual_memory/used'], marker='o') - plt.title('Memory Usage per Round') - plt.xlabel('round_number') - plt.ylabel('Virtual Memory Used (MB)') + plt.plot(data["round_number"], data["virtual_memory/used"], marker="o") + plt.title("Memory Usage per Round") + plt.xlabel("round_number") + plt.ylabel("Virtual Memory Used (MB)") plt.grid(True) output_path = f"{workspace_path}/mem_usage_plot.png" plt.savefig(output_path) plt.close() # Calculate statistics - min_mem = round(data['virtual_memory/used'].min(), 2) - max_mem = round(data['virtual_memory/used'].max(), 2) - mean_mem = round(data['virtual_memory/used'].mean(), 2) - variance_mem = round(data['virtual_memory/used'].var(), 2) - std_dev_mem = round(data['virtual_memory/used'].std(), 2) - slope, _, _, _, _ = linregress(data.index, data['virtual_memory/used']) + min_mem = round(data["virtual_memory/used"].min(), 2) + max_mem = round(data["virtual_memory/used"].max(), 2) + mean_mem = round(data["virtual_memory/used"].mean(), 2) + variance_mem = round(data["virtual_memory/used"].var(), 2) + std_dev_mem = round(data["virtual_memory/used"].std(), 2) + slope, _, _, _, _ = linregress(data.index, data["virtual_memory/used"]) slope = round(slope, 2) stats_path = f"{workspace_path}/mem_stats.txt" - with open(stats_path, 'w') as file: + with open(stats_path, "w") as file: file.write(f"Minimum Memory Used: {min_mem} MB\n") file.write(f"Maximum Memory Used: {max_mem} MB\n") file.write(f"Mean Memory Used: {mean_mem} MB\n") @@ -69,33 +74,54 @@ def generate_memory_report(memory_usage_dict, workspace_path): print("Memory report generation completed. Report saved to:", pdf_output_path) + def add_introduction(pdf): - pdf.chapter_title('Introduction') - intro_text = ("The purpose of this memory analysis is to identify memory usage trends and potential bottlenecks. " - "This analysis focuses on the relationship between round information and memory usage.") + pdf.chapter_title("Introduction") + intro_text = ( + "The purpose of this memory analysis is to identify memory usage trends and potential bottlenecks. " + "This analysis focuses on the relationship between round information and memory usage." + ) pdf.chapter_body(intro_text) + def add_chart_analysis(pdf, output_path, data): - pdf.chapter_title('Chart Analysis') + pdf.chapter_title("Chart Analysis") pdf.image(output_path, w=180) - diffs = data['virtual_memory/used'].diff().round(2) + diffs = data["virtual_memory/used"].diff().round(2) significant_changes = diffs[diffs.abs() > 500] for index, value in significant_changes.items(): - pdf.chapter_body(f"Significant memory change: {value} MB at Round {data['round_number'][index]}") + pdf.chapter_body( + f"Significant memory change: {value} MB at Round {data['round_number'][index]}" + ) + def add_statistical_overview(pdf, stats_path): - pdf.chapter_title('Statistical Overview') - with open(stats_path, 'r') as file: + pdf.chapter_title("Statistical Overview") + with open(stats_path, "r") as file: stats = file.read() pdf.chapter_body(stats) + def add_conclusion(pdf, slope): - pdf.chapter_title('Conclusion') + pdf.chapter_title("Conclusion") if slope > 0: conclusion_text = "The upward slope in the graph indicates a trend of increasing memory usage over rounds." else: conclusion_text = "There is no continuous memory growth." pdf.chapter_body(conclusion_text) -# Uncomment the following line to run the function directly when this script is executed -# generate_memory_report('/home/sys_tpe_st_svc_acct/memory_leak/mem_info_aggr.csv') + +def convert_to_json(file): + """ + Reads a file containing JSON objects, one per line, and converts them into a list of parsed JSON objects. + Args: + file (str): The path to the file containing JSON objects. + Returns: + list: A list of parsed JSON objects. + """ + with open(file, "r") as infile: + json_objects = infile.readlines() + + # Parse each JSON object + parsed_json_objects = [json.loads(obj) for obj in json_objects] + return parsed_json_objects diff --git a/tests/end_to_end/utils/summary_helper.py b/tests/end_to_end/utils/summary_helper.py index 25b29ad9fd..cfdbc17a9e 100644 --- a/tests/end_to_end/utils/summary_helper.py +++ b/tests/end_to_end/utils/summary_helper.py @@ -7,6 +7,7 @@ from pathlib import Path import tests.end_to_end.utils.constants as constants +from tests.end_to_end.utils.generate_report import convert_to_json # Initialize the XML parser parser = etree.XMLParser(recover=True, encoding="utf-8") @@ -38,26 +39,10 @@ def get_aggregated_accuracy(agg_log_file): ) return agg_accuracy - # Example line(s) containing spaces and special characters: - """ - METRIC {'metric_origin': 'aggregator', 'task_name': 'aggregated_model_validation', 'metric_name': 'accuracy', 'metric_value': aggregator.py:933 - 0.15911591053009033, 'round': 0} - """ - try: - with open(agg_log_file, 'r') as f: - for line in f: - if "'metric_origin': 'aggregator'" in line and "aggregated_model_validation" in line: - # In Python versions < 3.11, aggregator.py file name appears in the line - # whereas in Python version 3.11, it is utils.py - line = line.split("aggregator.py:")[0].strip() - line = line.split("utils.py:")[0].strip() - # If the line does not contain closing bracket "}", then concatenate the next line - reqd_line = line if "}" in line else line + next(f).strip() - agg_accuracy = eval(reqd_line.split("METRIC")[1].strip('"'))["metric_value"] - except Exception as e: - # Do not fail the test if the accuracy cannot be fetched - print(f"Error while reading aggregator log file: {e}") - + agg_accuracy_dict = convert_to_json(agg_log_file) + agg_accuracy = agg_accuracy_dict[-1].get( + "aggregator/aggregated_model_validation/accuracy", "Not Found" + ) return agg_accuracy @@ -153,7 +138,12 @@ def main(): # Assumption - result directory is present in the home directory agg_log_file = os.path.join( - result_path, model_name, "aggregator", "workspace", "aggregator.log" + result_path, + model_name, + "aggregator", + "workspace", + "logs", + "aggregator_metrics.txt", ) agg_accuracy = get_aggregated_accuracy(agg_log_file) From 605b2b6e810a75d2c80f70790fa5f7906bd01e17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 3 Jan 2025 10:02:12 -0800 Subject: [PATCH 4/4] Bump pytest-asyncio from 0.25.0 to 0.25.1 (#1241) Bumps [pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio) from 0.25.0 to 0.25.1. - [Release notes](https://github.com/pytest-dev/pytest-asyncio/releases) - [Commits](https://github.com/pytest-dev/pytest-asyncio/compare/v0.25.0...v0.25.1) --- updated-dependencies: - dependency-name: pytest-asyncio dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-requirements.txt b/test-requirements.txt index 446d67e9af..c17ddf1364 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,7 +2,7 @@ docker lxml==5.3.0 paramiko pytest==8.3.4 -pytest-asyncio==0.25.0 +pytest-asyncio==0.25.1 pytest-mock==3.14.0 defusedxml==0.7.1 matplotlib==3.10.0