diff --git a/.devcontainer/prepare_workspace.sh b/.devcontainer/prepare_workspace.sh index 2a2e5ffdef..0a4207a5b2 100755 --- a/.devcontainer/prepare_workspace.sh +++ b/.devcontainer/prepare_workspace.sh @@ -19,4 +19,4 @@ export GITA_PROJECT_HOME # Generate workspace metadata files from known_good.json: # - .gita-workspace.csv -python3 tools/known_good_to_workspace_metadata.py --known-good known_good.json --gita-workspace .gita-workspace.csv +python3 scripts/known_good/known_good_to_workspace_metadata.py --known-good known_good.json --gita-workspace .gita-workspace.csv diff --git a/.github/workflows/reusable_integration-build.yml b/.github/workflows/reusable_integration-build.yml index 0b25f1c92d..f1723223f7 100644 --- a/.github/workflows/reusable_integration-build.yml +++ b/.github/workflows/reusable_integration-build.yml @@ -7,14 +7,14 @@ # What it does: # - Checks out the reference integration repository # - Updates score_modules.MODULE.bazel from the provided known_good.json -# - Builds all referenced modules (via scripts/integration_test.sh and Bazel) +# - Builds all referenced modules (via scripts/integration_test.py and Bazel) # - Runs integration tests # - Uploads logs from _logs/ as artifact: bazel-build-logs-${{ inputs.config }} # # Inputs: # - known_good (string, required): JSON content used to pin module SHAs. -# - config (string, optional, default: bl-x86_64-linux): Bazel config passed as -# CONFIG to scripts/integration_test.sh. +# - config (string, optional, default: bl-x86_64-linux): Bazel config passed to +# scripts/integration_test.py. # - repo_runner_labels (string, optional): Runner label(s). Accepts either a # single label string (e.g., ubuntu-latest) or a JSON string representing a # label or an array of labels (e.g., "\"ubuntu-latest\"" or @@ -22,7 +22,7 @@ # - target_branch (string, optional, default: main): Ref/branch to checkout. # # Repository Variables: -# - reference_integration_repo (optional): Repository to checkout (owner/repo). +# - REFERENCE_INTEGRATION_REPO (optional): Repository to checkout (owner/repo). # Default: eclipse-score/reference_integration # # Secrets: @@ -77,7 +77,7 @@ on: default: 'main' env: - REFERENCE_INTEGRATION_REPO: ${{ vars.reference_integration_repo != '' && vars.reference_integration_repo || 'eclipse-score/reference_integration' }} + REFERENCE_INTEGRATION_REPO: ${{ vars.REFERENCE_INTEGRATION_REPO != '' && vars.REFERENCE_INTEGRATION_REPO || 'eclipse-score/reference_integration' }} jobs: integration-test: @@ -127,14 +127,14 @@ jobs: echo "::endgroup::" echo "::group::update score_modules.MODULE.bazel" - python3 tools/update_module_from_known_good.py --known known_good.updated.json + python3 scripts/known_good/update_module_from_known_good.py --known known_good.updated.json cat score_modules.MODULE.bazel echo "::endgroup::" env: GITHUB_TOKEN: ${{ secrets.REPO_READ_TOKEN != '' && secrets.REPO_READ_TOKEN || github.token }} - name: Bazel build targets run: | - CONFIG="${{ inputs.config }}" scripts/integration_test.sh --known-good known_good.updated.json + python3 scripts/integration_test.py --known-good known_good.updated.json --config "${{ inputs.config }}" env: GITHUB_TOKEN: ${{ secrets.REPO_READ_TOKEN != '' && secrets.REPO_READ_TOKEN || github.token }} - name: Show disk space after build diff --git a/.github/workflows/reusable_smoke-test.yml b/.github/workflows/reusable_smoke-test.yml index c76d756272..ac5326c99d 100644 --- a/.github/workflows/reusable_smoke-test.yml +++ b/.github/workflows/reusable_smoke-test.yml @@ -95,14 +95,14 @@ jobs: run: | if [ "${{ github.repository }}" != "${{ env.REFERENCE_INTEGRATION_REPO }}" ]; then echo "Overriding ${{ inputs.module_name }} with current PR" - python3 tools/override_known_good_repo.py \ + python3 scripts/known_good/override_known_good_repo.py \ --known known_good.json \ --output known_good.updated.json \ --module-override ${{ inputs.module_name }}@${{ github.event.repository.clone_url }}@${{ github.sha }} else echo "Testing reference integration repository itself - updating to latest commits" echo "::group::get latest commits from module branches" - python3 tools/update_module_latest.py --output known_good.updated.json + python3 scripts/known_good/update_module_latest.py --output known_good.updated.json cat known_good.updated.json echo "::endgroup::" fi diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 5234f5b4f6..f67340a34d 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -6,7 +6,7 @@ "type": "shell", "command": "python3", "args": [ - "tools/known_good_to_workspace_metadata.py" + "scripts/known_good/known_good_to_workspace_metadata.py" ], "problemMatcher": [] }, @@ -15,7 +15,7 @@ "type": "shell", "command": "python3", "args": [ - "tools/update_module_from_known_good.py", + "scripts/known_good/update_module_from_known_good.py", "--override-type", "local_path" ], @@ -26,7 +26,7 @@ "type": "shell", "command": "python3", "args": [ - "tools/update_module_from_known_good.py", + "scripts/known_good/update_module_from_known_good.py", "--override-type", "git" ] diff --git a/README.md b/README.md index e2f67be8f3..18d238158f 100644 --- a/README.md +++ b/README.md @@ -202,4 +202,4 @@ local_path_override(module_name = "score_tooling", path = "../tooling") Use `scripts/generate_rust_analyzer_support.sh` to generate rust_analyzer settings that will let VS Code work. ## 🗂 Notes -Keep this file updated as integration issues are resolved. Prefer converting ad-hoc shell steps into Bazel rules or documented scripts under `tools/` for repeatability. +Keep this file updated as integration issues are resolved. Prefer converting ad-hoc shell steps into Bazel rules or documented scripts under `scripts/` for repeatability. diff --git a/build_config.json b/build_config.json new file mode 100644 index 0000000000..85c986fea1 --- /dev/null +++ b/build_config.json @@ -0,0 +1,28 @@ +{ + "modules": { + "score_baselibs": { + "build_targets": "@score_baselibs//score/..." + }, + "score_communication": { + "build_targets": "@score_communication//score/mw/com:com" + }, + "score_persistency": { + "build_targets": "@score_persistency//src/cpp/src/... @score_persistency//src/rust/..." + }, + "score_kyron": { + "build_targets": "@score_kyron//src/..." + }, + "score_orchestrator": { + "build_targets": "@score_orchestrator//src/..." + }, + "score_test_scenarios": { + "build_targets": "@score_test_scenarios//test_scenarios_rust:test_scenarios_rust @score_test_scenarios//test_scenarios_cpp:test_scenarios_cpp" + }, + "score_feo": { + "build_targets": "-- @score_feo//... -@score_feo//:docs -@score_feo//:ide_support -@score_feo//:needs_json" + }, + "score_logging": { + "build_targets": "@score_logging//score/... --@score_baselibs//score/memory/shared/flags:use_typedshmd=False --@score_baselibs//score/json:base_library=nlohmann --@score_logging//score/datarouter/build_configuration_flags:persistent_logging=False --@score_logging//score/datarouter/build_configuration_flags:persistent_config_feature_enabled=False --@score_logging//score/datarouter/build_configuration_flags:enable_nonverbose_dlt=False --@score_logging//score/datarouter/build_configuration_flags:enable_dynamic_configuration_in_datarouter=False --@score_logging//score/datarouter/build_configuration_flags:dlt_file_transfer_feature=False --@score_logging//score/datarouter/build_configuration_flags:use_local_vlan=True" + } + } +} diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000..7994ee43e2 --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1 @@ +# Scripts package diff --git a/scripts/integration_test.py b/scripts/integration_test.py new file mode 100755 index 0000000000..e08146f0f1 --- /dev/null +++ b/scripts/integration_test.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python3 +"""Integration build script for SCORE modules. + +Captures warning counts for regression tracking and generates build summaries. +""" + +import argparse +import json +import os +import re +import subprocess +import sys +import time +from datetime import datetime +from pathlib import Path +from typing import Dict, Optional, Tuple + +from models.build_config import BuildModuleConfig, load_build_config +from known_good.models import Module +from known_good.models.known_good import load_known_good + +repo_root = Path(__file__).parent.parent + + +def get_module_version_gh(repo_url: str, commit_hash: str) -> Optional[str]: + """Get version tag from GitHub API for a commit hash. + + Args: + repo_url: GitHub repository URL + commit_hash: Commit hash to look up + + Returns: + Tag name if found, None otherwise + """ + # Check if gh CLI is installed + if not subprocess.run(['which', 'gh'], capture_output=True).returncode == 0: + print("::warning::gh CLI not found. Install it to resolve commit hashes to tags.") + return None + + # Extract owner/repo from GitHub URL + match = re.search(r'github\.com[/:]([^/]+)/([^/.]+)(\.git)?$', repo_url) + if not match: + print(f"::warning::Invalid repo URL format: {repo_url}") + return None + + owner, repo = match.group(1), match.group(2) + + print(f"::debug::Querying GitHub API: repos/{owner}/{repo}/tags for commit {commit_hash}") + + try: + result = subprocess.run( + ['gh', 'api', f'repos/{owner}/{repo}/tags', '--jq', + f'.[] | select(.commit.sha == "{commit_hash}") | .name'], + capture_output=True, + text=True, + timeout=10 + ) + + if result.returncode == 0 and result.stdout.strip(): + tag = result.stdout.strip().split('\n')[0] + print(f"::debug::Found tag: {tag}") + return tag + + print(f"::debug::No tag found for commit {commit_hash}") + return None + except Exception as e: + print(f"::warning::Error querying GitHub API: {e}") + return None + + +def truncate_hash(hash_str: str, length: int = 8) -> str: + """Truncate hash to specified length. + + Args: + hash_str: Full hash string + length: Maximum length + + Returns: + Truncated hash + """ + if len(hash_str) > length: + return hash_str[:length] + return hash_str + + +def count_pattern(log_file: Path, pattern: str) -> int: + """Count lines matching pattern in log file. + + Args: + log_file: Path to log file + pattern: Pattern to search for (case-insensitive) + + Returns: + Number of matching lines found + """ + if not log_file.exists(): + return 0 + + count = 0 + with open(log_file, 'r') as f: + for line in f: + if pattern in line.lower(): + count += 1 + return count + + +def get_identifier_and_link(module: Optional[Module]) -> Tuple[Optional[str], str]: + """Get display identifier and link for a module. + + Args: + module: Module instance or None + + Returns: + Tuple of (identifier, link_url) + """ + if not module or not module.hash: + return None, "" + + if module.version: + identifier = module.version + link = f"{module.repo}/releases/tag/{module.version}" if module.repo else "" + else: + # Try to get version from GitHub + if module.repo: + gh_version = get_module_version_gh(module.repo, module.hash) + if gh_version: + identifier = gh_version + link = f"{module.repo}/releases/tag/{gh_version}" + else: + identifier = truncate_hash(module.hash) + link = f"{module.repo}/tree/{module.hash}" + else: + identifier = truncate_hash(module.hash) + link = "" + + return identifier, link + + +def build_group(group_name: str, targets: str, config: str, log_file: Path) -> Tuple[int, int]: + """Build a group of Bazel targets. + + Args: + group_name: Name of the build group + targets: Bazel targets to build + config: Bazel config to use + log_file: Path to log file + + Returns: + Tuple of (exit_code, duration_seconds) + """ + print(f"--- Building group: {group_name} ---") + + # Build command + cmd = ['bazel', 'build', '--verbose_failures', f'--config={config}'] + targets.split() + + print(f"bazel build --verbose_failures --config {config} {targets}") + print(f"::group::Bazel build ({group_name})") + + start_time = time.time() + + # Run build and capture output + with open(log_file, 'w') as f: + process = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True + ) + + # Stream output to both terminal and file + if process.stdout: + for line in process.stdout: + print(line, end='') + f.write(line) + + process.wait() + + end_time = time.time() + duration = int(end_time - start_time) + + print("::endgroup::") + + return process.returncode, duration + + +def format_commit_version_cell( + group_name: str, + old_modules: Dict[str, Module], + new_modules: Dict[str, Module] +) -> str: + """Format the commit/version cell for the summary table. + + Args: + group_name: Name of the module group + old_modules: Modules from old known_good.json + new_modules: Modules from new known_good.json + + Returns: + Formatted markdown cell content + """ + # Get module info or defaults + old_module = old_modules.get(group_name) + new_module = new_modules.get(group_name) + + if new_module is None or new_module.hash is None: + return "N/A" + + print(f"::debug::Module={group_name}, old_version={old_module.version if old_module else 'None'}, " + f"old_hash={old_module.hash if old_module else 'None'}, " + f"new_version={new_module.version}, " + f"new_hash={new_module.hash}, " + f"repo={new_module.repo}") + + # Get identifiers and links + old_identifier, old_link = get_identifier_and_link(old_module) + + # Check if hash changed + hash_changed = old_module is None or old_module.hash != new_module.hash + + # Determine new identifier only if hash changed + new_identifier, new_link = (None, "") if not hash_changed else get_identifier_and_link(new_module) + + # Format output + if hash_changed: + # Hash changed - show old -> new + if new_module.repo and old_module and old_link and new_link and old_module.hash and new_identifier: + return f"[{old_identifier}]({old_link}) → [{new_identifier}]({new_link}) ([diff]({new_module.repo}/compare/{old_module.hash}...{new_module.hash}))" + elif new_module.repo and new_link and new_identifier: + return f"{old_identifier} → [{new_identifier}]({new_link})" + elif new_identifier: + return f"{old_identifier} → {new_identifier}" + else: + return "N/A" + elif old_identifier: + # Hash not changed - show only old + if old_link: + return f"[{old_identifier}]({old_link})" + else: + return old_identifier + else: + return "N/A" + + +def main(): + """Main entry point.""" + parser = argparse.ArgumentParser( + description='Integration build script for SCORE modules', + formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument( + '--known-good', + type=Path, + default=None, + help='Path to known_good.json file (default: known_good.json in repo root)' + ) + parser.add_argument( + '--build-config', + type=Path, + default=None, + help='Path to build_config.json file (default: build_config.json in repo root)' + ) + parser.add_argument( + '--config', + default=os.environ.get('CONFIG', 'bl-x86_64-linux'), + help='Bazel config to use (default: bl-x86_64-linux, or from CONFIG env var)' + ) + + args = parser.parse_args() + + # Configuration + config = args.config + log_dir = Path(os.environ.get('LOG_DIR', '_logs/logs')) + summary_file = Path(os.environ.get('SUMMARY_FILE', '_logs/build_summary.md')) + + known_good_file = args.known_good + if not known_good_file: + known_good_file = repo_root / 'known_good.json' + + build_config_file = args.build_config + if not build_config_file: + build_config_file = repo_root / 'build_config.json' + + # Load build configuration + BUILD_TARGET_GROUPS = load_build_config(build_config_file) + + # Create log directory + log_dir.mkdir(parents=True, exist_ok=True) + summary_file.parent.mkdir(parents=True, exist_ok=True) + + # Load modules from known_good files + old_modules = load_known_good(Path('known_good.json')).modules if Path('known_good.json').exists() else {} + new_modules = load_known_good(known_good_file).modules if known_good_file else {} + + # Start summary + timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + with open(summary_file, 'w') as f: + f.write(f"=== Integration Build Started {timestamp} ===\n") + f.write(f"Config: {config}\n") + if known_good_file: + f.write(f"Known Good File: {known_good_file}\n") + f.write("\n") + f.write("## Build Groups Summary\n") + f.write("\n") + f.write("| Group | Status | Duration (s) | Warnings | Deprecated refs | Commit/Version |\n") + f.write("|-------|--------|--------------|----------|-----------------|----------------|\n") + + print(f"=== Integration Build Started {timestamp} ===") + print(f"Config: {config}") + if known_good_file: + print(f"Known Good File: {known_good_file}") + + overall_warn_total = 0 + overall_depr_total = 0 + any_failed = False + + # Build each group + for group_name, module_config in BUILD_TARGET_GROUPS.items(): + log_file = log_dir / f"{group_name}.log" + + exit_code, duration = build_group(group_name, module_config.build_targets, config, log_file) + + if exit_code != 0: + any_failed = True + + # Count warnings and deprecated + warn_count = count_pattern(log_file, 'warning:') + depr_count = count_pattern(log_file, 'deprecated') + overall_warn_total += warn_count + overall_depr_total += depr_count + + # Format status + status_symbol = "✅" if exit_code == 0 else f"❌({exit_code})" + + # Format commit/version cell + commit_version_cell = format_commit_version_cell(group_name, old_modules, new_modules) + + # Append row to summary + row = f"| {group_name} | {status_symbol} | {duration} | {warn_count} | {depr_count} | {commit_version_cell} |\n" + with open(summary_file, 'a') as f: + f.write(row) + print(row.strip()) + + # Append totals + with open(summary_file, 'a') as f: + f.write(f"| TOTAL | | | {overall_warn_total} | {overall_depr_total} | |\n") + + # Print summary + print('::group::Build Summary') + print('=== Build Summary ===') + with open(summary_file, 'r') as f: + for line in f: + print(line, end='') + print('::endgroup::') + + # Exit with error if any build failed + if any_failed: + print("::error::One or more build groups failed. See summary above.") + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/scripts/integration_test.sh b/scripts/integration_test.sh deleted file mode 100755 index 20947b1d30..0000000000 --- a/scripts/integration_test.sh +++ /dev/null @@ -1,356 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Integration build script. -# Captures warning counts for regression tracking. -# -# Usage: ./integration_test.sh [--known-good ] -# --known-good: Optional path to known_good.json file - -CONFIG=${CONFIG:-bl-x86_64-linux} -LOG_DIR=${LOG_DIR:-_logs/logs} -SUMMARY_FILE=${SUMMARY_FILE:-_logs/build_summary.md} -KNOWN_GOOD_FILE="" -script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# Go up one level to get the repository root since this script is in scripts/ directory -repo_root="$(cd "${script_dir}/.." && pwd)" - -# Set default known_good.json if it exists -if [[ -z "${KNOWN_GOOD_FILE}" ]] && [[ -f "known_good.json" ]]; then - KNOWN_GOOD_FILE="known_good.json" -fi - -# maybe move this to known_good.json or a config file later -declare -A BUILD_TARGET_GROUPS=( - [score_baselibs]="@score_baselibs//score/..." - [score_communication]="@score_communication//score/mw/com:com" - [score_persistency]="@score_persistency//src/cpp/src/... @score_persistency//src/rust/..." - [score_kyron]="@score_kyron//src/..." - [score_orchestrator]="@score_orchestrator//src/..." - [score_test_scenarios]="@score_test_scenarios//test_scenarios_rust:test_scenarios_rust @score_test_scenarios//test_scenarios_cpp:test_scenarios_cpp" - [score_feo]="-- @score_feo//... -@score_feo//:docs -@score_feo//:ide_support -@score_feo//:needs_json" - [score_logging]="@score_logging//score/... \ - --@score_baselibs//score/memory/shared/flags:use_typedshmd=False \ - --@score_baselibs//score/json:base_library=nlohmann \ - --@score_logging//score/datarouter/build_configuration_flags:persistent_logging=False \ - --@score_logging//score/datarouter/build_configuration_flags:persistent_config_feature_enabled=False \ - --@score_logging//score/datarouter/build_configuration_flags:enable_nonverbose_dlt=False \ - --@score_logging//score/datarouter/build_configuration_flags:enable_dynamic_configuration_in_datarouter=False \ - --@score_logging//score/datarouter/build_configuration_flags:dlt_file_transfer_feature=False \ - --@score_logging//score/datarouter/build_configuration_flags:use_local_vlan=True " -) - -# Parse command line arguments -while [[ $# -gt 0 ]]; do - case $1 in - --known-good) - KNOWN_GOOD_FILE="$2" - shift 2 - ;; - *) - echo "Unknown option: $1" - echo "Usage: $0 [--known-good ]" - exit 1 - ;; - esac -done - -mkdir -p "${LOG_DIR}" || true - -# Function to extract commit hash from known_good.json -get_commit_hash() { - local module_name=$1 - local known_good_file=$2 - - if [[ -z "${known_good_file}" ]] || [[ ! -f "${known_good_file}" ]]; then - echo "N/A" - return - fi - - # Use the Python script to extract module info - local result - result=$(python3 "${repo_root}/tools/get_module_info.py" "${known_good_file}" "${module_name}" "hash" 2>&1) - if [[ $? -eq 0 ]] && [[ -n "${result}" ]] && [[ "${result}" != "N/A" ]]; then - echo "${result}" - else - echo "N/A" - fi -} - -# Function to extract repo URL from known_good.json -get_module_repo() { - local module_name=$1 - local known_good_file=$2 - - if [[ -z "${known_good_file}" ]] || [[ ! -f "${known_good_file}" ]]; then - echo "N/A" - return - fi - - # Use the Python script to extract module repo - local result - result=$(python3 "${repo_root}/tools/get_module_info.py" "${known_good_file}" "${module_name}" "repo" 2>&1) - if [[ $? -eq 0 ]] && [[ -n "${result}" ]] && [[ "${result}" != "N/A" ]]; then - echo "${result}" - else - echo "N/A" - fi -} - -# Function to extract version from known_good.json -get_module_version() { - local module_name=$1 - local known_good_file=$2 - - if [[ -z "${known_good_file}" ]] || [[ ! -f "${known_good_file}" ]]; then - echo "N/A" - return - fi - - # Use the Python script to extract module version - local result - result=$(python3 "${repo_root}/tools/get_module_info.py" "${known_good_file}" "${module_name}" "version" 2>&1) - if [[ $? -eq 0 ]] && [[ -n "${result}" ]] && [[ "${result}" != "N/A" ]]; then - echo "${result}" - else - echo "N/A" - fi -} - -get_module_version_gh() { - local module_name=$1 - local known_good_file=$2 - local repo_url=$3 - local commit_hash=$4 - - if [[ -z "${known_good_file}" ]] || [[ ! -f "${known_good_file}" ]]; then - echo "::warning::get_module_version_gh: known_good_file not found or empty" >&2 - echo "N/A" - return - fi - - # Check if gh CLI is installed - if ! command -v gh &> /dev/null; then - echo "::warning::gh CLI not found. Install it to resolve commit hashes to tags." >&2 - echo "N/A" - return - fi - - echo "::debug::get_module_version_gh: module=${module_name}, repo=${repo_url}, hash=${commit_hash}" >&2 - - # Extract owner/repo from GitHub URL - if [[ "${repo_url}" =~ github\.com[/:]([^/]+)/([^/.]+)(\.git)?$ ]]; then - local owner="${BASH_REMATCH[1]}" - local repo="${BASH_REMATCH[2]}" - - echo "::debug::Querying GitHub API: repos/${owner}/${repo}/tags for commit ${commit_hash}" >&2 - - # Query GitHub API for tags and find matching commit - local tag_name - tag_name=$(gh api "repos/${owner}/${repo}/tags" --jq ".[] | select(.commit.sha == \"${commit_hash}\") | .name" 2>/dev/null | head -n1) - - if [[ -n "${tag_name}" ]]; then - echo "::debug::Found tag: ${tag_name}" >&2 - echo "${tag_name}" - else - echo "::debug::No tag found for commit ${commit_hash}" >&2 - echo "N/A" - fi - else - echo "::warning::Invalid repo URL format: ${repo_url}" >&2 - echo "N/A" - fi -} - -# Helper function to truncate hash -truncate_hash() { - local hash=$1 - if [[ ${#hash} -gt 8 ]]; then - echo "${hash:0:8}" - else - echo "${hash}" - fi -} - -warn_count() { - # Grep typical compiler and Bazel warnings; adjust patterns as needed. - local file=$1 - # Count lines with 'warning:' excluding ones from system headers optionally later. - grep -i 'warning:' "$file" | wc -l || true -} - -depr_count() { - local file=$1 - grep -i 'deprecated' "$file" | wc -l || true -} - -timestamp() { date '+%Y-%m-%d %H:%M:%S'; } - -echo "=== Integration Build Started $(timestamp) ===" | tee "${SUMMARY_FILE}" -echo "Config: ${CONFIG}" | tee -a "${SUMMARY_FILE}" -if [[ -n "${KNOWN_GOOD_FILE}" ]]; then - echo "Known Good File: ${KNOWN_GOOD_FILE}" | tee -a "${SUMMARY_FILE}" -fi -echo "" >> "${SUMMARY_FILE}" -echo "## Build Groups Summary" >> "${SUMMARY_FILE}" -echo "" >> "${SUMMARY_FILE}" -# Markdown table header -{ - echo "| Group | Status | Duration (s) | Warnings | Deprecated refs | Commit/Version |"; - echo "|-------|--------|--------------|----------|-----------------|----------------|"; -} >> "${SUMMARY_FILE}" - -overall_warn_total=0 -overall_depr_total=0 - -# Track if any build group failed -any_failed=0 - -for group in "${!BUILD_TARGET_GROUPS[@]}"; do - targets="${BUILD_TARGET_GROUPS[$group]}" - log_file="${LOG_DIR}/${group}.log" - - # Log build group banner only to stdout/stderr (not into summary table file) - echo "--- Building group: ${group} ---" - start_ts=$(date +%s) - echo "bazel build --verbose_failures --config "${CONFIG}" ${targets}" - # GitHub Actions log grouping start - echo "::group::Bazel build (${group})" - set +e - bazel build --verbose_failures --config "${CONFIG}" ${targets} 2>&1 | tee "$log_file" - build_status=${PIPESTATUS[0]} - # Track if any build group failed - if [[ ${build_status} -ne 0 ]]; then - any_failed=1 - fi - set -e - echo "::endgroup::" # End Bazel build group - end_ts=$(date +%s) - duration=$(( end_ts - start_ts )) - w_count=$(warn_count "$log_file") - d_count=$(depr_count "$log_file") - overall_warn_total=$(( overall_warn_total + w_count )) - overall_depr_total=$(( overall_depr_total + d_count )) - # Append as a markdown table row (duration without trailing 's') - if [[ ${build_status} -eq 0 ]]; then - status_symbol="✅" - else - status_symbol="❌(${build_status})" - fi - - # Get commit hash/version for this group (group name is the module name) - commit_hash=$(get_commit_hash "${group}" "${KNOWN_GOOD_FILE}") - commit_hash_old=$(get_commit_hash "${group}" "known_good.json") - version=$(get_module_version "${group}" "${KNOWN_GOOD_FILE}") - repo=$(get_module_repo "${group}" "${KNOWN_GOOD_FILE}") - - # Debug output - echo "::debug::Module=${group}, version=${version}, hash=${commit_hash}, hash_old=${commit_hash_old}, repo=${repo}" >&2 - - # Determine what to display and link to - # Step 1: Determine old version/hash identifier - old_identifier="N/A" - old_link="" - if [[ "${commit_hash_old}" != "N/A" ]]; then - echo "::debug::Step 1: Getting old version for ${group}" >&2 - version_old=$(get_module_version "${group}" "known_good.json") - echo "::debug::version_old from JSON: ${version_old}" >&2 - if [[ "${version_old}" == "N/A" ]]; then - # Try to get version from GitHub API - echo "::debug::Trying to resolve version_old from GitHub for ${group}" >&2 - version_old=$(get_module_version_gh "${group}" "known_good.json" "${repo}" "${commit_hash_old}") - echo "::debug::version_old from GitHub: ${version_old}" >&2 - fi - - # Prefer version over hash - if [[ "${version_old}" != "N/A" ]]; then - old_identifier="${version_old}" - if [[ "${repo}" != "N/A" ]]; then - old_link="${repo}/releases/tag/${version_old}" - fi - else - old_identifier=$(truncate_hash "${commit_hash_old}") - if [[ "${repo}" != "N/A" ]]; then - old_link="${repo}/tree/${commit_hash_old}" - fi - fi - echo "::debug::old_identifier=${old_identifier}" >&2 - fi - - # Step 2: Determine if hash changed - hash_changed=0 - if [[ "${commit_hash_old}" != "N/A" ]] && [[ "${commit_hash}" != "N/A" ]] && [[ "${commit_hash}" != "${commit_hash_old}" ]]; then - hash_changed=1 - fi - echo "::debug::hash_changed=${hash_changed}" >&2 - - # Step 3: Determine new version/hash identifier (only if hash changed) - new_identifier="N/A" - new_link="" - if [[ ${hash_changed} -eq 1 ]] && [[ "${commit_hash}" != "N/A" ]]; then - echo "::debug::Step 3: Hash changed, getting new version for ${group}" >&2 - # Try to get version from known_good file first, then GitHub API - if [[ "${version}" == "N/A" ]]; then - echo "::debug::Trying to resolve new version from GitHub for ${group}" >&2 - version=$(get_module_version_gh "${group}" "${KNOWN_GOOD_FILE}" "${repo}" "${commit_hash}") - echo "::debug::new version from GitHub: ${version}" >&2 - fi - - # Prefer version over hash - if [[ "${version}" != "N/A" ]]; then - new_identifier="${version}" - if [[ "${repo}" != "N/A" ]]; then - new_link="${repo}/releases/tag/${version}" - fi - else - new_identifier=$(truncate_hash "${commit_hash}") - if [[ "${repo}" != "N/A" ]]; then - new_link="${repo}/tree/${commit_hash}" - fi - fi - echo "::debug::new_identifier=${new_identifier}" >&2 - fi - - # Step 4: Format output based on whether hash changed - echo "::debug::Formatting output: hash_changed=${hash_changed}, old=${old_identifier}, new=${new_identifier}" >&2 - if [[ ${hash_changed} -eq 1 ]]; then - # Hash changed - show old -> new - if [[ "${repo}" != "N/A" ]] && [[ -n "${old_link}" ]] && [[ -n "${new_link}" ]]; then - commit_version_cell="[${old_identifier}](${old_link}) → [${new_identifier}](${new_link}) ([diff](${repo}/compare/${commit_hash_old}...${commit_hash}))" - else - commit_version_cell="${old_identifier} → ${new_identifier}" - fi - elif [[ "${old_identifier}" != "N/A" ]]; then - # Hash not changed - show only old - if [[ "${repo}" != "N/A" ]] && [[ -n "${old_link}" ]]; then - commit_version_cell="[${old_identifier}](${old_link})" - else - commit_version_cell="${old_identifier}" - fi - elif [[ "${new_identifier}" != "N/A" ]]; then - # No old available - show new - if [[ "${repo}" != "N/A" ]] && [[ -n "${new_link}" ]]; then - commit_version_cell="[${new_identifier}](${new_link})" - else - commit_version_cell="${new_identifier}" - fi - else - # Nothing available - commit_version_cell="N/A" - fi - - echo "| ${group} | ${status_symbol} | ${duration} | ${w_count} | ${d_count} | ${commit_version_cell} |" | tee -a "${SUMMARY_FILE}" -done - -# Append aggregate totals row to summary table -echo "| TOTAL | | | ${overall_warn_total} | ${overall_depr_total} | |" >> "${SUMMARY_FILE}" -echo '::group::Build Summary' -echo '=== Build Summary (echo) ===' -cat "${SUMMARY_FILE}" || echo "(Could not read summary file ${SUMMARY_FILE})" -echo '::endgroup::' - -# Report to GitHub Actions if any build group failed -if [[ ${any_failed} -eq 1 ]]; then - echo "::error::One or more build groups failed. See summary above." - exit 1 -fi diff --git a/scripts/known_good/__init__.py b/scripts/known_good/__init__.py new file mode 100644 index 0000000000..eac8e9d3f5 --- /dev/null +++ b/scripts/known_good/__init__.py @@ -0,0 +1 @@ +# Known good utilities package diff --git a/tools/known_good_to_workspace_metadata.py b/scripts/known_good/known_good_to_workspace_metadata.py similarity index 85% rename from tools/known_good_to_workspace_metadata.py rename to scripts/known_good/known_good_to_workspace_metadata.py index dd9e3ad8b8..f77b738cbb 100644 --- a/tools/known_good_to_workspace_metadata.py +++ b/scripts/known_good/known_good_to_workspace_metadata.py @@ -1,9 +1,9 @@ #!/usr/bin/env python3 import argparse -import json import csv +from pathlib import Path -from models import Module +from models.known_good import load_known_good MODULES_CSV_HEADER = [ "repo_url", @@ -21,13 +21,9 @@ def main(): parser.add_argument("--gita-workspace", dest="gita_workspace", default=".gita-workspace.csv", help="File to output gita workspace metadata") args = parser.parse_args() - with open(args.known_good, "r") as f: - data = json.load(f) - - modules_dict = data.get("modules", {}) - - # Parse modules using Module dataclass - modules = Module.parse_modules(modules_dict) + # Load known_good using KnownGood dataclass + known_good = load_known_good(Path(args.known_good)) + modules = list(known_good.modules.values()) gita_metadata = [] for module in modules: diff --git a/tools/models/__init__.py b/scripts/known_good/models/__init__.py similarity index 100% rename from tools/models/__init__.py rename to scripts/known_good/models/__init__.py diff --git a/scripts/known_good/models/known_good.py b/scripts/known_good/models/known_good.py new file mode 100644 index 0000000000..70fedc3e0a --- /dev/null +++ b/scripts/known_good/models/known_good.py @@ -0,0 +1,89 @@ +"""KnownGood dataclass for score reference integration.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any, Dict +from pathlib import Path +import json +import datetime as dt + +from .module import Module + + +@dataclass +class KnownGood: + """Known good configuration with modules and metadata.""" + modules: Dict[str, Module] + timestamp: str + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> KnownGood: + """Create a KnownGood instance from a dictionary. + + Args: + data: Dictionary containing known_good.json data + + Returns: + KnownGood instance + """ + modules_dict = data.get('modules', {}) + modules_list = Module.parse_modules(modules_dict) + modules = {m.name: m for m in modules_list} + timestamp = data.get('timestamp', '') + + return cls(modules=modules, timestamp=timestamp) + + def to_dict(self) -> Dict[str, Any]: + """Convert KnownGood instance to dictionary for JSON output. + + Returns: + Dictionary with known_good configuration + """ + return { + "modules": {name: module.to_dict() for name, module in self.modules.items()}, + "timestamp": self.timestamp + } + + def write(self, output_path: Path, dry_run: bool = False) -> None: + """Write known_good data to file or print for dry-run. + + Args: + output_path: Path to output file + dry_run: If True, print instead of writing + """ + + # Update timestamp before writing + self.timestamp = dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat() + "Z" + + output_json = json.dumps(self.to_dict(), indent=4, sort_keys=False) + "\n" + + if dry_run: + print(f"\nDry run: would write to {output_path}\n") + print("---- BEGIN UPDATED JSON ----") + print(output_json, end="") + print("---- END UPDATED JSON ----") + else: + with open(output_path, "w", encoding="utf-8") as f: + f.write(output_json) + print(f"Successfully wrote updated known_good.json to {output_path}") + + +def load_known_good(path: Path) -> KnownGood: + """Load and parse the known_good.json file. + + Args: + path: Path to known_good.json file + + Returns: + KnownGood instance with parsed modules + """ + with open(path, "r", encoding="utf-8") as f: + data = json.load(f) + + if not isinstance(data, dict) or not isinstance(data.get("modules"), dict): + raise SystemExit( + f"Invalid known_good.json at {path} (expected object with 'modules' dict)" + ) + + return KnownGood.from_dict(data) diff --git a/tools/models/module.py b/scripts/known_good/models/module.py similarity index 99% rename from tools/models/module.py rename to scripts/known_good/models/module.py index 3db3354e51..259f4f2861 100644 --- a/tools/models/module.py +++ b/scripts/known_good/models/module.py @@ -97,7 +97,7 @@ def to_dict(self) -> Dict[str, Any]: Returns: Dictionary with module configuration """ - result = { + result: Dict[str, Any] = { "repo": self.repo, "hash": self.hash } diff --git a/tools/override_known_good_repo.py b/scripts/known_good/override_known_good_repo.py similarity index 79% rename from tools/override_known_good_repo.py rename to scripts/known_good/override_known_good_repo.py index d892d58321..ecb573fe40 100755 --- a/tools/override_known_good_repo.py +++ b/scripts/known_good/override_known_good_repo.py @@ -13,31 +13,20 @@ update_module_from_known_good.py to generate the MODULE.bazel file. """ import argparse -import json import os import re import datetime as dt -from typing import Dict, Any, List +from pathlib import Path +from typing import Dict, List import logging from models import Module +from models.known_good import KnownGood, load_known_good # Configure logging logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') -def load_known_good(path: str) -> Dict[str, Any]: - """Load and parse the known_good.json file.""" - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - - if isinstance(data, dict) and isinstance(data.get("modules"), dict): - return data - raise SystemExit( - f"Invalid known_good.json at {path} (expected object with 'modules' dict)" - ) - - def parse_and_apply_overrides(modules: Dict[str, Module], repo_overrides: List[str]) -> int: """ Parse repo override arguments and apply them to modules. @@ -139,44 +128,28 @@ def parse_and_apply_overrides(modules: Dict[str, Module], repo_overrides: List[s return overrides_applied -def apply_overrides(data: Dict[str, Any], repo_overrides: List[str]) -> Dict[str, Any]: - """Apply repository commit overrides to the known_good data.""" - modules_dict = data.get("modules", {}) - - # Parse modules into Module instances (skip validation since we're just overriding) - modules_list = [Module.from_dict(name, mod_data) for name, mod_data in modules_dict.items()] - modules = {m.name: m for m in modules_list} +def apply_overrides(known_good: KnownGood, repo_overrides: List[str]) -> KnownGood: + """Apply repository commit overrides to the known_good data. + Args: + known_good: KnownGood instance to modify + repo_overrides: List of override strings + + Returns: + Updated KnownGood instance + """ # Parse and apply overrides - overrides_applied = parse_and_apply_overrides(modules, repo_overrides) + overrides_applied = parse_and_apply_overrides(known_good.modules, repo_overrides) if overrides_applied == 0: logging.warning("No overrides were applied to any modules") else: logging.info(f"Successfully applied {overrides_applied} override(s)") - # Convert modules back to dict format - data["modules"] = {name: module.to_dict() for name, module in modules.items()} - # Update timestamp - data["timestamp"] = dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat() + "Z" + known_good.timestamp = dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat() + "Z" - return data - - -def write_known_good(data: Dict[str, Any], output_path: str, dry_run: bool = False) -> None: - """Write known_good data to file or print for dry-run.""" - output_json = json.dumps(data, indent=4, sort_keys=False) + "\n" - - if dry_run: - print(f"\nDry run: would write to {output_path}\n") - print("---- BEGIN UPDATED JSON ----") - print(output_json, end="") - print("---- END UPDATED JSON ----") - else: - with open(output_path, "w", encoding="utf-8") as f: - f.write(output_json) - logging.info(f"Successfully wrote updated known_good.json to {output_path}") + return known_good def main() -> None: @@ -250,15 +223,15 @@ def main() -> None: # Load, update, and output logging.info(f"Loading {known_path}") - data = load_known_good(known_path) + known_good = load_known_good(known_path) if not args.module_overrides: parser.error("at least one --module-override is required") overrides = args.module_overrides - updated_data = apply_overrides(data, overrides) - write_known_good(updated_data, output_path, args.dry_run) + updated_known_good = apply_overrides(known_good, overrides) + updated_known_good.write(Path(output_path), args.dry_run) if __name__ == "__main__": diff --git a/tools/requirements.txt b/scripts/known_good/requirements.txt similarity index 100% rename from tools/requirements.txt rename to scripts/known_good/requirements.txt diff --git a/tools/update_module_from_known_good.py b/scripts/known_good/update_module_from_known_good.py similarity index 85% rename from tools/update_module_from_known_good.py rename to scripts/known_good/update_module_from_known_good.py index a2c6c720eb..49101e354e 100755 --- a/tools/update_module_from_known_good.py +++ b/scripts/known_good/update_module_from_known_good.py @@ -4,42 +4,29 @@ with `bazel_dep` and `git_override` calls for each module in the JSON. Usage: - python3 tools/update_module_from_known_good.py \ + python3 scripts/known_good/update_module_from_known_good.py \ --known known_good.json \ --output score_modules.MODULE.bazel The generated score_modules.MODULE.bazel file is included by MODULE.bazel. Note: To override repository commits before generating the MODULE.bazel file, -use tools/override_known_good_repo.py first to create an updated known_good.json. +use scripts/known_good/override_known_good_repo.py first to create an updated known_good.json. """ import argparse -import json import os import re -from datetime import datetime import logging -from typing import Dict, List, Any, Optional +from typing import Dict, List, Optional +from pathlib import Path from models import Module +from models.known_good import load_known_good # Configure logging logging.basicConfig(level=logging.WARNING, format='%(levelname)s: %(message)s') -def load_known_good(path: str) -> Dict[str, Any]: - """Load and parse the known_good.json file.""" - with open(path, "r", encoding="utf-8") as f: - data = json.load(f) - - # Expect a single JSON object containing a "modules" dict/object - if isinstance(data, dict) and isinstance(data.get("modules"), dict): - return data - raise SystemExit( - f"Invalid known_good.json at {path} (expected object with 'modules' dict)" - ) - - def generate_git_override_blocks(modules: List[Module], repo_commit_dict: Dict[str, str]) -> List[str]: """Generate bazel_dep and git_override blocks for each module.""" blocks = [] @@ -134,7 +121,7 @@ def generate_file_content(args: argparse.Namespace, modules: List[Module], repo_ if timestamp: header += ( f"# Generated from known_good.json at {timestamp}\n" - "# Do not edit manually - use tools/update_module_from_known_good.py\n" + "# Do not edit manually - use scripts/known_good/update_module_from_known_good.py\n" "\n" ) @@ -161,17 +148,17 @@ def main() -> None: epilog=""" Examples: # Generate MODULE.bazel from known_good.json - python3 tools/update_module_from_known_good.py + python3 scripts/known_good/update_module_from_known_good.py # Use a custom input and output file - python3 tools/update_module_from_known_good.py \\ + python3 scripts/known_good/update_module_from_known_good.py \\ --known custom_known_good.json \\ --output custom_modules.MODULE.bazel # Preview without writing - python3 tools/update_module_from_known_good.py --dry-run + python3 scripts/known_good/update_module_from_known_good.py --dry-run -Note: To override repository commits, use tools/override_known_good_repo.py first. +Note: To override repository commits, use scripts/known_good/override_known_good_repo.py first. """ ) parser.add_argument( @@ -231,19 +218,15 @@ def main() -> None: repo_commit_dict[repo_url] = commit_hash # Load known_good.json - data = load_known_good(known_path) - modules_dict = data.get("modules") or {} - if not modules_dict: + known_good = load_known_good(Path(known_path)) + if not known_good.modules: raise SystemExit("No modules found in known_good.json") - # Parse modules into Module dataclass instances - modules = Module.parse_modules(modules_dict) - if not modules: - raise SystemExit("No valid modules to process") + # Get modules list + modules = list(known_good.modules.values()) # Generate file content - timestamp = data.get("timestamp") or datetime.now().isoformat() - content = generate_file_content(args, modules, repo_commit_dict, timestamp) + content = generate_file_content(args, modules, repo_commit_dict, known_good.timestamp) if args.dry_run: print(f"Dry run: would write to {output_path}\n") diff --git a/tools/update_module_latest.py b/scripts/known_good/update_module_latest.py similarity index 71% rename from tools/update_module_latest.py rename to scripts/known_good/update_module_latest.py index 8884485e3a..3a93f639a0 100755 --- a/tools/update_module_latest.py +++ b/scripts/known_good/update_module_latest.py @@ -27,12 +27,12 @@ import argparse import shutil import subprocess -import datetime as dt import json import os import sys +from pathlib import Path -from models import Module +from models.known_good import load_known_good try: from github import Github, GithubException @@ -83,29 +83,6 @@ def fetch_latest_commit_gh(owner_repo: str, branch: str) -> str: return sha -def load_known_good(path: str) -> dict: - with open(path, "r", encoding="utf-8") as f: - return json.load(f) - - -def write_known_good(path: str, original: dict, modules: list[Module]) -> None: - out = dict(original) # shallow copy - out["timestamp"] = dt.datetime.now(dt.timezone.utc).replace(microsecond=0).isoformat() + "Z" - out["modules"] = {} - for m in modules: - mod_dict = {"repo": m.repo, "hash": m.hash} - if m.version: - mod_dict["version"] = m.version - if m.patches: - mod_dict["patches"] = m.patches - if m.branch: - mod_dict["branch"] = m.branch - out["modules"][m.name] = mod_dict - with open(path, "w", encoding="utf-8") as f: - json.dump(out, f, indent=4, sort_keys=False) - f.write("\n") - - def parse_args(argv: list[str]) -> argparse.Namespace: p = argparse.ArgumentParser(description="Update module hashes to latest commit on branch") p.add_argument( @@ -123,43 +100,20 @@ def parse_args(argv: list[str]) -> argparse.Namespace: def main(argv: list[str]) -> int: args = parse_args(argv) try: - data = load_known_good(args.known_good) - except OSError as e: - print(f"ERROR: Cannot read known_good file: {e}", file=sys.stderr) + known_good = load_known_good(Path(args.known_good)) + except (OSError, SystemExit) as e: + print(f"ERROR: Cannot read or parse known_good file: {e}", file=sys.stderr) return 3 except json.JSONDecodeError as e: - print(f"ERROR: Invalid JSON: {e}", file=sys.stderr) + print(f"ERROR: Invalid JSON syntax: {e}", file=sys.stderr) return 3 - modules_raw = data.get("modules", {}) - modules: list[Module] = [] - for name, m in modules_raw.items(): - try: - version = m.get("version") - hash_val = m.get("hash", "") - patches = m.get("patches") - repo = m.get("repo") - branch = m.get("branch") - if not repo: - print(f"WARNING: skipping module {name} with missing repo", file=sys.stderr) - continue - modules.append(Module( - name=name, - hash=hash_val, - repo=repo, - version=version, - patches=patches, - branch=branch - )) - except KeyError as e: - print(f"WARNING: skipping module {name} missing key {e}: {m}", file=sys.stderr) - if not modules: + if not known_good.modules: print("ERROR: No modules found to update.", file=sys.stderr) return 3 token = os.environ.get("GITHUB_TOKEN") failures = 0 - updated: list[Module] = [] # Default: use gh if available unless --no-gh specified use_gh = (not args.no_gh) and shutil.which("gh") is not None @@ -174,7 +128,7 @@ def main(argv: list[str]) -> int: if args.no_gh and shutil.which("gh") is not None: print("INFO: --no-gh specified; ignoring installed 'gh' CLI", file=sys.stderr) - for mod in modules: + for mod in known_good.modules.values(): try: # Use module-specific branch if available, otherwise use command-line branch branch = mod.branch if mod.branch else args.branch @@ -184,25 +138,25 @@ def main(argv: list[str]) -> int: latest = fetch_latest_commit(mod.owner_repo, branch, token) # Only reuse version if hash did not change - version_to_use = mod.version if latest == mod.hash else None - updated.append(Module(name=mod.name, hash=latest, repo=mod.repo, version=version_to_use, patches=mod.patches, branch=mod.branch)) + if latest != mod.hash: + mod.hash = latest + mod.version = None # Clear version when hash changes # Display format: if version exists, show "version -> hash", otherwise "hash -> hash" if mod.version: print(f"{mod.name}: {mod.version} -> {latest[:8]} (branch {branch})") else: - print(f"{mod.name}: {mod.hash[:8]} -> {latest[:8]} (branch {branch})") + old_hash = known_good.modules[mod.name].hash + print(f"{mod.name}: {old_hash[:8]} -> {latest[:8]} (branch {branch})") except Exception as e: # noqa: BLE001 failures += 1 print(f"ERROR {mod.name}: {e}", file=sys.stderr) if args.fail_fast: break - # Preserve old hash if continuing - updated.append(mod) - if args.output and updated: + if args.output: try: - write_known_good(args.output, data, updated) + known_good.write(Path(args.output)) print(f"Updated JSON written to {args.output}") except OSError as e: print(f"ERROR: Failed writing output file: {e}", file=sys.stderr) diff --git a/scripts/models/__init__.py b/scripts/models/__init__.py new file mode 100644 index 0000000000..f3d9f4b1ed --- /dev/null +++ b/scripts/models/__init__.py @@ -0,0 +1 @@ +# Models package diff --git a/scripts/models/build_config.py b/scripts/models/build_config.py new file mode 100644 index 0000000000..b7975f6be8 --- /dev/null +++ b/scripts/models/build_config.py @@ -0,0 +1,40 @@ +"""Build configuration management for SCORE modules.""" + +import json +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, Optional + + +@dataclass +class BuildModuleConfig: + """Configuration for a build module.""" + name: str + build_targets: str + test_targets: Optional[str] = None + + +def load_build_config(config_path: Path) -> Dict[str, BuildModuleConfig]: + """Load build configuration from JSON file. + + Args: + config_path: Path to build_config.json file + + Returns: + Dictionary mapping module names to BuildModuleConfig instances + """ + if not config_path.exists(): + raise FileNotFoundError(f"Build config file not found: {config_path}") + + with open(config_path, 'r') as f: + data = json.load(f) + + modules = data.get('modules', {}) + return { + name: BuildModuleConfig( + name=name, + build_targets=module_data.get('build_targets', ''), + test_targets=module_data.get('test_targets') + ) + for name, module_data in modules.items() + } diff --git a/tools/get_module_info.py b/tools/get_module_info.py deleted file mode 100755 index 45286d9292..0000000000 --- a/tools/get_module_info.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 -"""Extract module information from known_good.json.""" - -import json -import sys -from typing import Optional - -from models import Module - - -def load_module(known_good_file: str, module_name: str) -> Optional[Module]: - """ - Load module from known_good.json. - - Args: - known_good_file: Path to the known_good.json file - module_name: Name of the module to look up - - Returns: - Module instance, or None if not found - """ - try: - with open(known_good_file, 'r') as f: - data = json.load(f) - modules_dict = data.get('modules', {}) - module_data = modules_dict.get(module_name) - - if not module_data: - return None - - return Module.from_dict(module_name, module_data) - except Exception as e: - # Log error to stderr for debugging - print(f"Error loading {known_good_file}: {e}", file=sys.stderr) - return None - - -def get_module_field(module: Optional[Module], field: str = 'hash') -> str: - """ - Extract a specific field from module. - - Args: - module: Module instance - field: Field to extract ('hash', 'version', 'repo', or 'all') - - Returns: - Requested field value, or 'N/A' if not found - For 'hash': returns the hash value - For 'all': returns hash/version (prefers hash, falls back to version) - """ - if not module: - return 'N/A' - - if field == 'repo': - repo = module.repo or 'N/A' - # Remove .git suffix if present - if repo.endswith('.git'): - repo = repo[:-4] - return repo - elif field == 'version': - return module.version or 'N/A' - elif field == 'hash': - return module.hash or 'N/A' - else: # field == 'all' or default - return module.hash or module.version or 'N/A' - - -if __name__ == '__main__': - if len(sys.argv) < 3 or len(sys.argv) > 4: - print('Usage: get_module_info.py [field]') - print(' field: hash (default), version, repo, or all') - print('N/A') - sys.exit(1) - - known_good_file = sys.argv[1] - module_name = sys.argv[2] - field = sys.argv[3] if len(sys.argv) == 4 else 'all' - - module = load_module(known_good_file, module_name) - result = get_module_field(module, field) - print(result)