diff --git a/.github/release.yaml b/.github/release.yaml new file mode 100644 index 0000000000..9ef36aca6d --- /dev/null +++ b/.github/release.yaml @@ -0,0 +1,8 @@ +changelog: + categories: + - title: Features + labels: + - enhancement + - title: Bug fixes + labels: + - bug diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 34085c9225..cd60ef68a5 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -6,6 +6,7 @@ reviewers: [ 'team:data-platform-postgresql', ], + "baseBranches": ["main", "/^*\\/edge$/"], packageRules: [ { matchPackageNames: [ @@ -13,32 +14,7 @@ ], allowedVersions: '<2.0.0', }, - { - matchManagers: [ - 'custom.regex', - ], - matchDepNames: [ - 'juju', - ], - matchDatasources: [ - 'pypi', - ], - allowedVersions: '<3', - groupName: 'Juju agents', - }, ], customManagers: [ - { - customType: 'regex', - fileMatch: [ - '^\\.github/workflows/[^/]+\\.ya?ml$', - ], - matchStrings: [ - '(libjuju: )==(?.*?) +# renovate: latest libjuju 2', - ], - depNameTemplate: 'juju', - datasourceTemplate: 'pypi', - versioningTemplate: 'loose', - }, ], } diff --git a/.github/workflows/approve_renovate_pr.yaml b/.github/workflows/approve_renovate_pr.yaml new file mode 100644 index 0000000000..4449576ea3 --- /dev/null +++ b/.github/workflows/approve_renovate_pr.yaml @@ -0,0 +1,15 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. +name: Approve Renovate pull request + +on: + pull_request: + types: + - opened + +jobs: + approve-pr: + name: Approve Renovate pull request + uses: canonical/data-platform-workflows/.github/workflows/approve_renovate_pr.yaml@v30.2.0 + permissions: + pull-requests: write # Needed to approve PR diff --git a/.github/workflows/check_pr.yaml b/.github/workflows/check_pr.yaml new file mode 100644 index 0000000000..6eb3823585 --- /dev/null +++ b/.github/workflows/check_pr.yaml @@ -0,0 +1,18 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. +name: Check pull request + +on: + pull_request: + types: + - opened + - labeled + - unlabeled + - edited + branches: + - main + +jobs: + check-pr: + name: Check pull request + uses: canonical/data-platform-workflows/.github/workflows/check_charm_pr.yaml@v30.2.0 diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 3d8bc7679f..b17734d628 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -27,7 +27,7 @@ on: jobs: lint: name: Lint - uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v29.0.5 + uses: canonical/data-platform-workflows/.github/workflows/lint.yaml@v30.2.0 unit-test: name: Unit test charm @@ -49,46 +49,19 @@ jobs: build: name: Build charm - uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v29.0.5 + uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v30.2.0 + with: + cache: false integration-test: - strategy: - fail-fast: false - matrix: - juju: - - agent: 2.9.51 # renovate: juju-agent-pin-minor - libjuju: ==2.9.49.1 # renovate: latest libjuju 2 - allure_on_amd64: false - - agent: 3.6.2 # renovate: juju-agent-pin-minor - allure_on_amd64: true - architecture: - - amd64 - include: - - juju: - agent: 3.6.2 # renovate: juju-agent-pin-minor - allure_on_amd64: true - architecture: arm64 - name: Integration | ${{ matrix.juju.agent }} | ${{ matrix.architecture }} + name: Integration test charm needs: - lint - unit-test - build - uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v29.0.5 + uses: ./.github/workflows/integration_test.yaml with: artifact-prefix: ${{ needs.build.outputs.artifact-prefix }} - architecture: ${{ matrix.architecture }} - cloud: microk8s - microk8s-snap-channel: 1.32-strict/stable # renovate: latest microk8s - juju-agent-version: ${{ matrix.juju.agent }} - libjuju-version-constraint: ${{ matrix.juju.libjuju }} - _beta_allure_report: ${{ matrix.juju.allure_on_amd64 && matrix.architecture == 'amd64' }} - secrets: - integration-test: | - { - "AWS_ACCESS_KEY": "${{ secrets.AWS_ACCESS_KEY }}", - "AWS_SECRET_KEY": "${{ secrets.AWS_SECRET_KEY }}", - "GCP_ACCESS_KEY": "${{ secrets.GCP_ACCESS_KEY }}", - "GCP_SECRET_KEY": "${{ secrets.GCP_SECRET_KEY }}", - } + secrets: inherit permissions: - contents: write # Needed for Allure Report beta + contents: write # Needed for Allure Report diff --git a/.github/workflows/cla-check.yml b/.github/workflows/cla-check.yml index f0590d5b65..2567517472 100644 --- a/.github/workflows/cla-check.yml +++ b/.github/workflows/cla-check.yml @@ -9,4 +9,4 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Check if Canonical's Contributor License Agreement has been signed - uses: canonical/has-signed-canonical-cla@v1 + uses: canonical/has-signed-canonical-cla@v2 diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml new file mode 100644 index 0000000000..a88dcfbbdd --- /dev/null +++ b/.github/workflows/integration_test.yaml @@ -0,0 +1,313 @@ +on: + workflow_call: + inputs: + artifact-prefix: + description: | + Prefix for charm package GitHub artifact(s) + + Use canonical/data-platform-workflows build_charm.yaml to build the charm(s) + required: true + type: string + +jobs: + collect-integration-tests: + name: Collect integration test spread jobs + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up environment + run: | + sudo snap install charmcraft --classic + pipx install tox poetry + - name: Collect spread jobs + id: collect-jobs + shell: python + run: | + import json + import os + import subprocess + + spread_jobs = ( + subprocess.run( + ["charmcraft", "test", "--list", "github-ci"], capture_output=True, check=True, text=True + ) + .stdout.strip() + .split("\n") + ) + jobs = [] + for job in spread_jobs: + # Example `job`: "github-ci:ubuntu-24.04:tests/spread/test_charm.py:juju36" + _, runner, task, variant = job.split(":") + # Example: "test_charm.py" + task = task.removeprefix("tests/spread/") + if runner.endswith("-arm"): + architecture = "arm64" + else: + architecture = "amd64" + # Example: "test_charm.py:juju36 | amd64" + name = f"{task}:{variant} | {architecture}" + # ":" character not valid in GitHub Actions artifact + name_in_artifact = f"{task}-{variant}-{architecture}" + jobs.append({ + "spread_job": job, + "name": name, + "name_in_artifact": name_in_artifact, + "runner": runner, + }) + output = f"jobs={json.dumps(jobs)}" + print(output) + with open(os.environ["GITHUB_OUTPUT"], "a") as file: + file.write(output) + - name: Generate Allure default test results + if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }} + run: tox run -e integration -- tests/integration --allure-default-dir=allure-default-results + - name: Upload Allure default results + # Default test results in case the integration tests time out or runner set up fails + # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test) + if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }} + uses: actions/upload-artifact@v4 + with: + name: allure-default-results-integration-test + path: allure-default-results/ + if-no-files-found: error + outputs: + jobs: ${{ steps.collect-jobs.outputs.jobs }} + + integration-test: + strategy: + fail-fast: false + matrix: + job: ${{ fromJSON(needs.collect-integration-tests.outputs.jobs) }} + name: ${{ matrix.job.name }} + needs: + - collect-integration-tests + runs-on: ${{ matrix.job.runner }} + timeout-minutes: 217 # Sum of steps `timeout-minutes` + 5 + steps: + - name: Free up disk space + timeout-minutes: 1 + run: | + printf '\nDisk usage before cleanup\n' + df --human-readable + # Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173 + rm -r /opt/hostedtoolcache/ + printf '\nDisk usage after cleanup\n' + df --human-readable + - name: Checkout + timeout-minutes: 3 + uses: actions/checkout@v4 + - name: Set up environment + timeout-minutes: 5 + run: sudo snap install charmcraft --classic + # TODO: remove when https://github.com/canonical/charmcraft/issues/2105 and + # https://github.com/canonical/charmcraft/issues/2130 fixed + - run: | + sudo snap install go --classic + go install github.com/snapcore/spread/cmd/spread@latest + - name: Download packed charm(s) + timeout-minutes: 5 + uses: actions/download-artifact@v4 + with: + pattern: ${{ inputs.artifact-prefix }}-* + merge-multiple: true + - name: Run spread job + timeout-minutes: 180 + id: spread + # TODO: replace with `charmcraft test` when + # https://github.com/canonical/charmcraft/issues/2105 and + # https://github.com/canonical/charmcraft/issues/2130 fixed + run: ~/go/bin/spread -vv -artifacts=artifacts '${{ matrix.job.spread_job }}' + env: + AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} + AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }} + GCP_ACCESS_KEY: ${{ secrets.GCP_ACCESS_KEY }} + GCP_SECRET_KEY: ${{ secrets.GCP_SECRET_KEY }} + - name: Upload Allure results + timeout-minutes: 3 + # Only upload results from one spread system & one spread variant + # Allure can only process one result per pytest test ID. If parameterization is done via + # spread instead of pytest, there will be overlapping pytest test IDs. + if: ${{ (success() || (failure() && steps.spread.outcome == 'failure')) && startsWith(matrix.job.spread_job, 'github-ci:ubuntu-24.04:') && endsWith(matrix.job.spread_job, ':juju36') && github.event_name == 'schedule' && github.run_attempt == '1' }} + uses: actions/upload-artifact@v4 + with: + name: allure-results-integration-test-${{ matrix.job.name_in_artifact }} + path: artifacts/${{ matrix.job.spread_job }}/allure-results/ + if-no-files-found: error + - timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: snap list + - name: Select model + timeout-minutes: 1 + # `!contains(matrix.job.spread_job, 'juju29')` workaround for juju 2 error: + # "ERROR cannot acquire lock file to read controller concierge-microk8s: unable to open + # /tmp/juju-store-lock-3635383939333230: permission denied" + # Unable to workaround error with `sudo rm /tmp/juju-*` + if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }} + id: juju-switch + run: | + # sudo needed since spread runs scripts as root + # "testing" is default model created by concierge + sudo juju switch testing + mkdir ~/logs/ + - name: juju status + timeout-minutes: 1 + if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }} + run: sudo juju status --color --relations | tee ~/logs/juju-status.txt + - name: juju debug-log + timeout-minutes: 3 + if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }} + run: sudo juju debug-log --color --replay --no-tail | tee ~/logs/juju-debug-log.txt + - name: jhack tail + timeout-minutes: 3 + if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }} + run: sudo jhack tail --printer raw --replay --no-watch | tee ~/logs/jhack-tail.txt + - name: Upload logs + timeout-minutes: 5 + if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }} + uses: actions/upload-artifact@v4 + with: + name: logs-integration-test-${{ matrix.job.name_in_artifact }} + path: ~/logs/ + if-no-files-found: error + - name: Disk usage + timeout-minutes: 1 + if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }} + run: df --human-readable + + allure-report: + # TODO future improvement: use concurrency group for job + name: Publish Allure report + if: ${{ !cancelled() && github.event_name == 'schedule' && github.run_attempt == '1' }} + needs: + - integration-test + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - name: Download Allure + # Following instructions from https://allurereport.org/docs/install-for-linux/#install-from-a-deb-package + run: gh release download --repo allure-framework/allure2 --pattern 'allure_*.deb' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Install Allure + run: | + sudo apt-get update + sudo apt-get install ./allure_*.deb -y + # For first run, manually create branch with no history + # (e.g. + # git checkout --orphan gh-pages-beta + # git rm -rf . + # touch .nojekyll + # git add .nojekyll + # git commit -m "Initial commit" + # git push origin gh-pages-beta + # ) + - name: Checkout GitHub pages branch + uses: actions/checkout@v4 + with: + ref: gh-pages-beta + path: repo/ + - name: Download default test results + # Default test results in case the integration tests time out or runner set up fails + # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test) + uses: actions/download-artifact@v4 + with: + path: allure-default-results/ + name: allure-default-results-integration-test + - name: Download test results + uses: actions/download-artifact@v4 + with: + path: allure-results/ + pattern: allure-results-integration-test-* + merge-multiple: true + - name: Combine Allure default results & actual results + # For every test: if actual result available, use that. Otherwise, use default result + # So that, if actual result not available, Allure report will show "unknown"/"failed" test result + # instead of omitting the test + shell: python + run: | + import dataclasses + import json + import pathlib + + + @dataclasses.dataclass(frozen=True) + class Result: + test_case_id: str + path: pathlib.Path + + def __eq__(self, other): + if not isinstance(other, type(self)): + return False + return self.test_case_id == other.test_case_id + + + actual_results = pathlib.Path("allure-results") + default_results = pathlib.Path("allure-default-results") + + results: dict[pathlib.Path, set[Result]] = { + actual_results: set(), + default_results: set(), + } + for directory, results_ in results.items(): + for path in directory.glob("*-result.json"): + with path.open("r") as file: + id_ = json.load(file)["testCaseId"] + results_.add(Result(id_, path)) + + actual_results.mkdir(exist_ok=True) + + missing_results = results[default_results] - results[actual_results] + for default_result in missing_results: + # Move to `actual_results` directory + default_result.path.rename(actual_results / default_result.path.name) + - name: Load test report history + run: | + if [[ -d repo/_latest/history/ ]] + then + echo 'Loading history' + cp -r repo/_latest/history/ allure-results/ + fi + - name: Create executor.json + shell: python + run: | + # Reverse engineered from https://github.com/simple-elf/allure-report-action/blob/eca283b643d577c69b8e4f048dd6cd8eb8457cfd/entrypoint.sh + import json + + DATA = { + "name": "GitHub Actions", + "type": "github", + "buildOrder": ${{ github.run_number }}, # TODO future improvement: use run ID + "buildName": "Run ${{ github.run_id }}", + "buildUrl": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}", + "reportUrl": "../${{ github.run_number }}/", + } + with open("allure-results/executor.json", "w") as file: + json.dump(DATA, file) + - name: Generate Allure report + run: allure generate + - name: Create index.html + shell: python + run: | + DATA = f""" + + + + """ + with open("repo/index.html", "w") as file: + file.write(DATA) + - name: Update GitHub pages branch + working-directory: repo/ + # TODO future improvement: commit message + run: | + mkdir '${{ github.run_number }}' + rm -f _latest + ln -s '${{ github.run_number }}' _latest + cp -r ../allure-report/. _latest/ + git add . + git config user.name "GitHub Actions" + git config user.email "41898282+github-actions[bot]@users.noreply.github.com" + git commit -m "Allure report ${{ github.run_number }}" + # Uses token set in checkout step + git push origin gh-pages-beta diff --git a/.github/workflows/promote.yaml b/.github/workflows/promote.yaml new file mode 100644 index 0000000000..6b2832b4ec --- /dev/null +++ b/.github/workflows/promote.yaml @@ -0,0 +1,36 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. +name: Promote charm + +on: + workflow_dispatch: + inputs: + from-risk: + description: Promote from this Charmhub risk + required: true + type: choice + options: + - edge + - beta + - candidate + to-risk: + description: Promote to this Charmhub risk + required: true + type: choice + options: + - beta + - candidate + - stable + +jobs: + promote: + name: Promote charm + uses: canonical/data-platform-workflows/.github/workflows/_promote_charm.yaml@v30.2.0 + with: + track: '16' + from-risk: ${{ inputs.from-risk }} + to-risk: ${{ inputs.to-risk }} + secrets: + charmhub-token: ${{ secrets.CHARMHUB_TOKEN }} + permissions: + contents: write # Needed to edit GitHub releases diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 905433c52d..cb9df85de5 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -5,7 +5,7 @@ name: Release to Charmhub on: push: branches: - - main + - '*/edge' paths-ignore: - 'tests/**' - 'docs/**' @@ -21,7 +21,7 @@ jobs: uses: ./.github/workflows/ci.yaml secrets: inherit permissions: - contents: write # Needed for Allure Report beta + contents: write # Needed for Allure Report release-libraries: name: Release libraries @@ -44,9 +44,9 @@ jobs: name: Release charm needs: - ci-tests - uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v29.0.5 + uses: canonical/data-platform-workflows/.github/workflows/release_charm.yaml@v30.2.0 with: - channel: 14/edge + channel: ${{ github.ref_name }} artifact-prefix: ${{ needs.ci-tests.outputs.artifact-prefix }} secrets: charmhub-token: ${{ secrets.CHARMHUB_TOKEN }} diff --git a/.github/workflows/sync_docs.yaml b/.github/workflows/sync_docs.yaml index 8c4ed4ef0c..1fc199fbda 100644 --- a/.github/workflows/sync_docs.yaml +++ b/.github/workflows/sync_docs.yaml @@ -10,7 +10,7 @@ on: jobs: sync-docs: name: Sync docs from Discourse - uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v29.0.5 + uses: canonical/data-platform-workflows/.github/workflows/sync_docs.yaml@v30.2.0 with: reviewers: a-velasco permissions: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 30d41cfae8..b3b1dfcbd8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,16 +34,10 @@ source venv/bin/activate tox run -e format # update your code according to linting rules tox run -e lint # code style tox run -e unit # unit tests -tox run -e integration-* # integration tests +charmcraft test lxd-vm: # integration tests tox # runs 'lint' and 'unit' environments ``` -Before running integration tests, run this command to ensure your config is accessible by lightkube: - -```shell -microk8s config > ~/.kube/config -``` - ## Build charm Build the charm in this git repository using: @@ -65,7 +59,7 @@ juju model-config logging-config="=INFO;unit=DEBUG" microk8s enable rbac # Deploy the charm -juju deploy ./postgresql-k8s_ubuntu-22.04-amd64.charm --trust \ +juju deploy ./postgresql-k8s_ubuntu-24.04-amd64.charm --trust \ --resource postgresql-image=$(yq '(.resources.postgresql-image.upstream-source)' metadata.yaml) ``` diff --git a/README.md b/README.md index e5abc71343..3a5583dcf4 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Bootstrap a Kubernetes (e.g. [Multipass-based MicroK8s](https://discourse.charmh ```shell juju add-model postgresql-k8s -juju deploy postgresql-k8s --channel 14 --trust +juju deploy postgresql-k8s --channel 16/edge --trust ``` **Note:** the `--trust` flag is required because the charm and Patroni need to create some K8s resources. @@ -62,7 +62,7 @@ Adding a relation is accomplished with `juju relate` (or `juju integrate` for Ju ```shell # Deploy Charmed PostgreSQL cluster with 3 nodes -juju deploy postgresql-k8s -n 3 --trust --channel 14 +juju deploy postgresql-k8s -n 3 --trust --channel 16/edge # Deploy the relevant application charms juju deploy mycharm @@ -87,7 +87,7 @@ juju status --relations This charm supports legacy interface `pgsql` from the previous [PostgreSQL charm](https://launchpad.net/postgresql-charm): ```shell -juju deploy postgresql-k8s --trust --channel 14 +juju deploy postgresql-k8s --trust --channel 16/edge juju deploy finos-waltz-k8s --channel edge juju relate postgresql-k8s:db finos-waltz-k8s ``` diff --git a/actions.yaml b/actions.yaml index b3e3c24abb..4f112dda58 100644 --- a/actions.yaml +++ b/actions.yaml @@ -34,8 +34,13 @@ list-backups: pre-upgrade-check: description: Run necessary pre-upgrade checks and preparations before executing a charm refresh. promote-to-primary: - description: Promotes the cluster of choice to a primary cluster. Must be ran against the leader unit. + description: Promotes the cluster of choice to a primary cluster. Must be ran against the leader unit when promoting a cluster + or against the unit to be promoted within the cluster. params: + scope: + type: string + default: cluster + description: Whether to promote a unit or a cluster. Must be set to either unit or cluster. force: type: boolean description: Force the promotion of a cluster when there is already a primary cluster. diff --git a/charmcraft.yaml b/charmcraft.yaml index b4cddfe1ff..e997e8bc3d 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -3,8 +3,8 @@ type: charm platforms: - ubuntu@22.04:amd64: - ubuntu@22.04:arm64: + ubuntu@24.04:amd64: + ubuntu@24.04:arm64: # Files implicitly created by charmcraft without a part: # - dispatch (https://github.com/canonical/charmcraft/pull/1898) # - manifest.yaml @@ -24,10 +24,10 @@ parts: # Use environment variable instead of `--break-system-packages` to avoid failing on older # versions of pip that do not recognize `--break-system-packages` # `--user` needed (in addition to `--break-system-packages`) for Ubuntu >=24.04 - PIP_BREAK_SYSTEM_PACKAGES=true python3 -m pip install --user --upgrade pip==25.0 # renovate: charmcraft-pip-latest + PIP_BREAK_SYSTEM_PACKAGES=true python3 -m pip install --user --upgrade pip==25.0.1 # renovate: charmcraft-pip-latest # Use uv to install poetry so that a newer version of Python can be installed if needed by poetry - curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/uv/releases/download/0.5.26/uv-installer.sh | sh # renovate: charmcraft-uv-latest + curl --proto '=https' --tlsv1.2 -LsSf https://github.com/astral-sh/uv/releases/download/0.6.5/uv-installer.sh | sh # renovate: charmcraft-uv-latest # poetry 2.0.0 requires Python >=3.9 if ! "$HOME/.local/bin/uv" python find '>=3.9' then @@ -35,7 +35,7 @@ parts: # (to reduce the number of Python versions we use) "$HOME/.local/bin/uv" python install 3.10.12 # renovate: charmcraft-python-ubuntu-22.04 fi - "$HOME/.local/bin/uv" tool install --no-python-downloads --python '>=3.9' poetry==2.0.1 --with poetry-plugin-export==1.9.0 # renovate: charmcraft-poetry-latest + "$HOME/.local/bin/uv" tool install --no-python-downloads --python '>=3.9' poetry==2.1.1 --with poetry-plugin-export==1.9.0 # renovate: charmcraft-poetry-latest ln -sf "$HOME/.local/bin/poetry" /usr/local/bin/poetry # "charm-poetry" part name is arbitrary; use for consistency @@ -75,7 +75,7 @@ parts: # rpds-py (Python package) >=0.19.0 requires rustc >=1.76, which is not available in the # Ubuntu 22.04 archive. Install rustc and cargo using rustup instead of the Ubuntu archive rustup set profile minimal - rustup default 1.84.1 # renovate: charmcraft-rust-latest + rustup default 1.85.0 # renovate: charmcraft-rust-latest craftctl default # Include requirements.txt in *.charm artifact for easier debugging @@ -86,6 +86,7 @@ parts: source: . stage: - LICENSE + - scripts - templates libpq: build-packages: diff --git a/concierge.yaml b/concierge.yaml new file mode 100644 index 0000000000..0d2bbc8c7a --- /dev/null +++ b/concierge.yaml @@ -0,0 +1,17 @@ +juju: + model-defaults: + logging-config: =INFO; unit=DEBUG +providers: + microk8s: + enable: true + bootstrap: true + addons: + - dns + - hostpath-storage + - rbac +host: + snaps: + jhack: + channel: latest/edge + connections: + - jhack:dot-local-share-juju snapd diff --git a/config.yaml b/config.yaml index c7f715ed15..ba66b91bc6 100644 --- a/config.yaml +++ b/config.yaml @@ -2,6 +2,29 @@ # See LICENSE file for licensing details. options: + synchronous_node_count: + description: | + Sets the number of synchronous nodes to be maintained in the cluster. Should be + either "all", "majority" or a positive integer value. + type: string + default: "all" + connection_authentication_timeout: + description: | + Sets the maximum allowed time to complete client authentication. + Allowed values are: from 1 to 600. + type: int + default: 60 + connection_statement_timeout: + description: | + Sets the maximum allowed duration of any statement. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 + cpu_parallel_leader_participation: + description: | + Controls whether Gather and Gather Merge also run subplans. + type: boolean + default: true durability_synchronous_commit: description: | Sets the current transactions synchronization level. This charm allows only the @@ -9,6 +32,10 @@ options: crashes and there are replicas. type: string default: "on" + experimental_max_connections: + type: int + description: | + [EXPERIMENTAL] Force set max_connections. instance_default_text_search_config: description: | Selects the text search configuration that is used by those variants of the text @@ -31,6 +58,18 @@ options: Allowed values are: “md5” and “scram-sha-256”. type: string default: "scram-sha-256" + instance_synchronize_seqscans: + description: | + Enable synchronized sequential scans. + type: boolean + default: true + logging_client_min_messages: + description: | + Sets the message levels that are sent to the client. + Allowed values are one of 'debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'log', 'notice', 'warning' or 'error'. + Each level includes all the levels that follow it. The later the level, the fewer messages are sent. + type: string + default: "notice" logging_log_connections: description: | Logs each successful connection. @@ -53,6 +92,13 @@ options: statement durations). type: int default: -1 + logging_track_functions: + description: | + Collects function-level statistics on database activity. + Allowed values are one of 'none', 'pl', 'all'. + Enables tracking of function call counts and time used. Specify pl to track only procedural-language functions + type: string + default: "none" memory_maintenance_work_mem: description: | Sets the maximum memory (KB) to be used for maintenance operations. @@ -90,251 +136,513 @@ options: Allowed values are: “on”, “off” and “partition”. type: string default: "partition" + optimizer_cpu_index_tuple_cost: + description: | + Sets the planner's estimate of the cost of processing each index entry during an index scan. + Allowed values are: from 0 to 1.80E+308. + type: float + default: 0.005 + optimizer_cpu_operator_cost: + description: | + Sets the planner's estimate of the cost of processing each operator or function call. + Allowed values are: from 0 to 1.80E+308. + type: float + default: 0.0025 + optimizer_cpu_tuple_cost: + description: | + Sets the planner's estimate of the cost of processing each tuple (row). + Allowed values are: from 0 to 1.80E+308. + type: float + default: 0.01 + optimizer_cursor_tuple_fraction: + description: | + Sets the planner's estimate of the fraction of a cursor's rows that will be retrieved. + Allowed values are: from 0 to 1. + type: float + default: 0.1 optimizer_default_statistics_target: description: | Sets the default statistics target. Allowed values are: from 1 to 10000. type: int default: 100 + optimizer_enable_async_append: + description: | + Enables the planner's use of async append plans. + type: boolean + default: true + optimizer_enable_bitmapscan: + description: | + Enables the planner's use of bitmap-scan plans. + type: boolean + default: true + optimizer_enable_gathermerge: + description: | + Enables the planner's use of gather merge plans. + type: boolean + default: true + optimizer_enable_hashagg: + description: | + Enables the planner's use of hashed aggregation plans. + type: boolean + default: true + optimizer_enable_hashjoin: + description: | + Enables the planner's use of hash join plans. + type: boolean + default: true + optimizer_enable_incremental_sort: + description: | + Enables the planner's use of incremental sort steps. + type: boolean + default: true + optimizer_enable_indexonlyscan: + description: | + Enables the planner's use of index-only-scan plans. + type: boolean + default: true + optimizer_enable_indexscan: + description: | + Enables the planner's use of index-scan plans. + type: boolean + default: true + optimizer_enable_material: + description: | + Enables the planner's use of materialization. + type: boolean + default: true + optimizer_enable_memoize: + description: | + Enables the planner's use of memoization. + type: boolean + default: true + optimizer_enable_mergejoin: + description: | + Enables the planner's use of merge join plans. + type: boolean + default: true + optimizer_enable_nestloop: + description: | + Enables the planner's use of nested-loop join plans. + type: boolean + default: true + optimizer_enable_parallel_append: + description: | + Enables the planner's use of parallel append plans. + type: boolean + default: true + optimizer_enable_parallel_hash: + description: | + Enables the planner's use of parallel hash plans. + type: boolean + default: true + optimizer_enable_partition_pruning: + description: | + Enables plan-time and execution-time partition pruning. + type: boolean + default: true + optimizer_enable_partitionwise_aggregate: + description: | + Enables partitionwise aggregation and grouping. + type: boolean + default: false + optimizer_enable_partitionwise_join: + description: | + Enables partitionwise join. + type: boolean + default: false + optimizer_enable_seqscan: + description: | + Enables the planner's use of sequential-scan plans. + type: boolean + default: true + optimizer_enable_sort: + description: | + Enables the planner's use of explicit sort steps. + type: boolean + default: true + optimizer_enable_tidscan: + description: | + Enables the planner's use of TID scan plans. + type: boolean + default: true optimizer_from_collapse_limit: description: | Sets the FROM-list size beyond which subqueries are not collapsed. Allowed values are: from 1 to 2147483647. type: int default: 8 + optimizer_geqo: + description: | + Enables genetic query optimization. + type: boolean + default: true + optimizer_geqo_effort: + description: | + GEQO: effort is used to set the default for other GEQO parameters. + Allowed values are: from 1 to 10. + type: int + default: 5 + optimizer_geqo_generations: + description: | + GEQO: number of iterations of the algorithm. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 + optimizer_geqo_pool_size: + description: | + GEQO: number of individuals in the population. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 + optimizer_geqo_seed: + description: | + GEQO: seed for random path selection. + Allowed values are: from 0 to 1. + type: float + default: 0.0 + optimizer_geqo_selection_bias: + description: | + GEQO: selective pressure within the population. + Allowed values are: from 1.5 to 2. + type: float + default: 2.0 + optimizer_geqo_threshold: + description: | + Sets the threshold of FROM items beyond which GEQO is used. + Allowed values are: from 2 to 2147483647. + type: int + default: 12 + optimizer_jit: + description: | + Allow JIT compilation. + type: boolean + default: true + optimizer_jit_above_cost: + description: | + Perform JIT compilation if query is more expensive. + Allowed values are: from -1 to 1.80E+308. + type: float + default: 100000.0 + optimizer_jit_inline_above_cost: + description: | + Perform JIT inlining if query is more expensive. + Allowed values are: from -1 to 1.80E+308. + type: float + default: 500000.0 + optimizer_jit_optimize_above_cost: + description: | + Optimize JIT-compiled functions if query is more expensive. + Allowed values are: from -1 to 1.80E+308. + type: float + default: 500000.0 optimizer_join_collapse_limit: description: | Sets the FROM-list size beyond which JOIN constructs are not flattened. Allowed values are: from 1 to 2147483647. type: int default: 8 - plugin_citext_enable: + optimizer_min_parallel_index_scan_size: + description: | + Sets the minimum amount of index data for a parallel scan. + Allowed values are: from 0 to 715827882. + type: int + default: 64 + optimizer_min_parallel_table_scan_size: + description: | + Sets the minimum amount of table data for a parallel scan. + Allowed values are: from 0 to 715827882. + type: int + default: 1024 + optimizer_parallel_setup_cost: + description: | + Sets the planner's estimate of the cost of starting up worker processes for parallel query. + Allowed values are: from 0 to 1.80E+308. + type: float + default: 1000.0 + optimizer_parallel_tuple_cost: + description: | + Sets the planner's estimate of the cost of passing each tuple (row) from worker to leader backend. + Allowed values are: from 0 to 1.80E+308. + type: float + default: 0.1 + plugin_address_standardizer_data_us_enable: default: false type: boolean - description: Enable citext extension. - plugin_debversion_enable: + description: Enable address_standardizer_data_us extension + plugin_address_standardizer_enable: default: false type: boolean - description: Enable debversion extension. - plugin_hstore_enable: - default: false + description: Enable address_standardizer extension + plugin_audit_enable: + default: true type: boolean - description: Enable hstore extension. - plugin_pg_trgm_enable: + description: Enable pgAudit extension + plugin_bloom_enable: default: false type: boolean - description: Enable pg_trgm extension. - plugin_plpython3u_enable: + description: Enable bloom extension + plugin_bool_plperl_enable: default: false type: boolean - description: Enable PL/Python extension. - plugin_unaccent_enable: + description: Enable bool_plperl extension + plugin_btree_gin_enable: default: false type: boolean - description: Enable unaccent extension. - plugin_bloom_enable: + description: Enable btree_gin extension + plugin_btree_gist_enable: default: false type: boolean - description: Enable bloom extension. - plugin_btree_gin_enable: + description: Enable btree_gist extension + plugin_citext_enable: default: false type: boolean - description: Enable btree_gin extension. - plugin_btree_gist_enable: + description: Enable citext extension + plugin_cube_enable: default: false type: boolean - description: Enable btree_gist extension. - plugin_cube_enable: + description: Enable cube extension + plugin_debversion_enable: default: false type: boolean - description: Enable cube extension. + description: Enable debversion extension plugin_dict_int_enable: default: false type: boolean - description: Enable dict_int extension. + description: Enable dict_int extension plugin_dict_xsyn_enable: default: false type: boolean - description: Enable dict_xsyn extension. + description: Enable dict_xsyn extension plugin_earthdistance_enable: default: false type: boolean - description: Enable earthdistance extension. + description: Enable earthdistance extension plugin_fuzzystrmatch_enable: default: false type: boolean - description: Enable fuzzystrmatch extension. - plugin_intarray_enable: + description: Enable fuzzystrmatch extension + plugin_hll_enable: default: false type: boolean - description: Enable intarray extension. - plugin_isn_enable: + description: Enable hll extension + plugin_hstore_enable: default: false type: boolean - description: Enable isn extension. - plugin_lo_enable: + description: Enable hstore extension + plugin_hypopg_enable: default: false type: boolean - description: Enable lo extension. - plugin_ltree_enable: + description: Enable hypopg extension + plugin_icu_ext_enable: default: false type: boolean - description: Enable ltree extension. - plugin_old_snapshot_enable: + description: Enable icu_ext extension + plugin_intarray_enable: default: false type: boolean - description: Enable old_snapshot extension. - plugin_pg_freespacemap_enable: + description: Enable intarray extension + plugin_ip4r_enable: default: false type: boolean - description: Enable pg_freespacemap extension. - plugin_pgrowlocks_enable: + description: Enable ip4r extension + plugin_isn_enable: default: false type: boolean - description: Enable pgrowlocks extension. - plugin_pgstattuple_enable: + description: Enable isn extension + plugin_jsonb_plperl_enable: default: false type: boolean - description: Enable pgstattuple extension. - plugin_pg_visibility_enable: + description: Enable jsonb_plperl extension + plugin_lo_enable: default: false type: boolean - description: Enable pg_visibility extension. - plugin_seg_enable: + description: Enable lo extension + plugin_ltree_enable: default: false type: boolean - description: Enable seg extension. - plugin_tablefunc_enable: + description: Enable ltree extension + plugin_old_snapshot_enable: default: false type: boolean - description: Enable tablefunc extension. - plugin_tcn_enable: + description: Enable old_snapshot extension + plugin_orafce_enable: default: false type: boolean - description: Enable tcn extension. - plugin_tsm_system_rows_enable: + description: Enable orafce extension + plugin_pg_freespacemap_enable: default: false type: boolean - description: Enable tsm_system_rows extension. - plugin_tsm_system_time_enable: + description: Enable pg_freespacemap extension + plugin_pg_similarity_enable: default: false type: boolean - description: Enable tsm_system_time extension. - plugin_uuid_ossp_enable: + description: Enable pg_similarity extension + plugin_pg_trgm_enable: default: false type: boolean - description: Enable uuid_ossp extension. - plugin_spi_enable: + description: Enable pg_trgm extension + plugin_pg_visibility_enable: default: false type: boolean - description: Enable spi extension. - plugin_bool_plperl_enable: + description: Enable pg_visibility extension + plugin_pgrowlocks_enable: default: false type: boolean - description: Enable bool_plperl extension. - plugin_hll_enable: + description: Enable pgrowlocks extension + plugin_pgstattuple_enable: default: false type: boolean - description: Enable hll extension. - plugin_hypopg_enable: + description: Enable pgstattuple extension + plugin_plperl_enable: default: false type: boolean - description: Enable hypopg extension. - plugin_ip4r_enable: + description: Enable plperl extension + plugin_plpython3u_enable: default: false type: boolean - description: Enable ip4r extension. - plugin_plperl_enable: + description: Enable PL/Python extension + plugin_pltcl_enable: default: false type: boolean - description: Enable plperl extension. - plugin_jsonb_plperl_enable: + description: Enable pltcl extension + plugin_postgis_enable: default: false type: boolean - description: Enable jsonb_plperl extension. - plugin_orafce_enable: + description: Enable postgis extension + plugin_postgis_raster_enable: default: false type: boolean - description: Enable orafce extension. - plugin_pg_similarity_enable: + description: Enable postgis_raster extension + plugin_postgis_tiger_geocoder_enable: + default: false + type: boolean + description: Enable postgis_tiger_geocoder extension + plugin_postgis_topology_enable: default: false type: boolean - description: Enable pg_similarity extension. + description: Enable postgis_topology extension plugin_prefix_enable: default: false type: boolean - description: Enable prefix extension. + description: Enable prefix extension plugin_rdkit_enable: default: false type: boolean - description: Enable rdkit extension. - plugin_tds_fdw_enable: + description: Enable rdkit extension + plugin_seg_enable: default: false type: boolean - description: Enable tds_fdw extension. - plugin_icu_ext_enable: + description: Enable seg extension + plugin_spi_enable: default: false type: boolean - description: Enable icu_ext extension. - plugin_pltcl_enable: + description: Enable spi extension + plugin_tablefunc_enable: default: false type: boolean - description: Enable pltcl extension. - plugin_postgis_enable: + description: Enable tablefunc extension + plugin_tcn_enable: default: false type: boolean - description: Enable postgis extension. - plugin_address_standardizer_enable: + description: Enable tcn extension + plugin_tds_fdw_enable: default: false type: boolean - description: Enable address_standardizer extension. - plugin_postgis_raster_enable: + description: Enable tds_fdw extension + plugin_timescaledb_enable: default: false type: boolean - description: Enable postgis_raster extension. - plugin_address_standardizer_data_us_enable: + description: Enable timescaledb extension + plugin_tsm_system_rows_enable: default: false type: boolean - description: Enable address_standardizer_data_us extension. - plugin_postgis_tiger_geocoder_enable: + description: Enable tsm_system_rows extension + plugin_tsm_system_time_enable: default: false type: boolean - description: Enable postgis_tiger_geocoder extension. - plugin_postgis_topology_enable: + description: Enable tsm_system_time extension + plugin_unaccent_enable: default: false type: boolean - description: Enable postgis_topology extension. - plugin_vector_enable: + description: Enable unaccent extension + plugin_uuid_ossp_enable: default: false type: boolean - description: Enable pgvector extension - plugin_timescaledb_enable: + description: Enable uuid_ossp extension + plugin_vector_enable: default: false type: boolean - description: Enable timescaledb extension - plugin_audit_enable: - default: true - type: boolean - description: Enable pgAudit extension + description: Enable pgvector extension profile: - description: | + description: | Profile representing the scope of deployment, and used to tune resource allocation. Allowed values are: “production” and “testing”. Production will tune postgresql for maximum performance while testing will tune for minimal running performance. - type: string - default: "production" + type: string + default: "production" profile_limit_memory: type: int description: | Amount of memory in Megabytes to limit PostgreSQL and associated process to. If unset, this will be decided according to the default memory limit in the selected profile. Only comes into effect when the `production` profile is selected. + request_array_nulls: + description: | + Enable input of NULL elements in arrays. + type: boolean + default: true + request_backslash_quote: + description: | + Sets whether "\'" is allowed in string literals. + Allowed values are "safe_encoding" and "on" and "off". + Safe_encoding is allow only if client encoding does not allow ASCII \ within a multibyte character. + type: string + default: "safe_encoding" request_date_style: description: | Sets the display format for date and time values. Allowed formats are explained in https://www.postgresql.org/docs/14/runtime-config-client.html#GUC-DATESTYLE. type: string default: "ISO, MDY" + request_deadlock_timeout: + description: | + Sets the time to wait on a lock before checking for deadlock. + Allowed values are: from 1 to 2147483647. + type: int + default: 1000 + request_default_transaction_deferrable: + escription: | + Sets the default deferrable status of new transactions. + type: boolean + default: false + request_default_transaction_isolation: + description: | + Sets the transaction isolation level of each new transaction. + Allowed values are one of 'serializable', 'repeatable read', 'read committed', 'read uncommitted'. + Read commited or read uncommited is a statement can only see rows committed before it began. + Repeatable read is all statements of the current transaction can only see rows committed before + the first query or data-modification statement was executed in this transaction. + Serializable is all statements of the current transaction can only see rows committed before the first + query or data-modification statement was executed in this transaction. + type: string + default: "read committed" + request_default_transaction_read_only: + description: | + Sets the default read-only status of new transactions. + type: boolean + default: false + request_escape_string_warning: + description: | + Warn about backslash escapes in ordinary string literals. + type: boolean + default: true + request_lock_timeout: + description: | + Sets the maximum allowed duration of any wait for a lock. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 request_standard_conforming_strings: description: | Causes ... strings to treat backslashes literally. @@ -347,12 +655,52 @@ options: like PST and POSIX-style time zone specifications. type: string default: "UTC" + request_track_activity_query_size: + description: | + Sets the size reserved for pg_stat_activity.query, in bytes. + Allowed values are: from 100 to 1048576. + type: int + default: 1024 + request_transform_null_equals: + description: | + Treats "expr=NULL" as "expr IS NULL" + type: boolean + default: false + request_xmlbinary: + description: | + Sets how binary values are to be encoded in XML. + Allowed values are one of 'base64', 'hex'. + type: string + default: "base64" + request_xmloption: + description: | + Sets whether XML data in implicit parsing and serialization operations is to be considered as documents or content fragments. + Allowed values are one of 'content', 'document'. + type: string + default: "content" response_bytea_output: description: | Sets the output format for bytes. Allowed values are: “escape” and “hex”. type: string default: "hex" + response_exit_on_error: + description: | + Terminate session on any error. + type: boolean + default: false + response_extra_float_digits: + description: | + Sets the number of digits displayed for floating-point values. + Allowed values are: from -15 to 3. + type: int + default: 1 + response_gin_fuzzy_search_limit: + description: | + Sets the maximum allowed result for exact search by GIN. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 response_lc_monetary: description: | Sets the locale for formatting monetary amounts. @@ -371,6 +719,42 @@ options: Allowed values are the locales available in the unit. type: string default: "C" + session_idle_in_transaction_session_timeout: + description: | + Sets the maximum allowed idle time between queries, when in a transaction. + Allowed values are: from 0 to 2147483647. + type: int + default: 0 + storage_bgwriter_lru_maxpages: + description: | + Background writer maximum number of LRU pages to flush per round. + Allowed values are: from 0 to 1073741823. + type: int + default: 100 + storage_bgwriter_lru_multiplier: + description: | + Multiple of the average buffer usage to free per round. + Allowed values are: from 0 to 10. + type: float + default: 2.0 + storage_default_table_access_method: + description: | + Sets the default table access method for new tables. + These entries can be created using the CREATE ACCESS METHOD SQL command. + type: string + default: "heap" + storage_gin_pending_list_limit: + description: | + Sets the maximum size of the pending list for GIN index. + Allowed values are: from 64 to 2147483647. + type: int + default: 4096 + storage_old_snapshot_threshold: + description: | + Time before a snapshot is too old to read pages changed after the snapshot was taken. + Allowed values are: from -1 to 86400. + type: int + default: -1 vacuum_autovacuum_analyze_scale_factor: description: | Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when @@ -390,6 +774,12 @@ options: transaction ID wraparound. Allowed values are: from 100000 to 2000000000. type: int default: 200000000 + vacuum_autovacuum_naptime: + description: | + Time to sleep between autovacuum runs. + Allowed values are: from 1 to 2147483. + type: int + default: 60 vacuum_autovacuum_vacuum_cost_delay: description: | Sets cost delay value (milliseconds) that will be used in automatic VACUUM operations. @@ -397,6 +787,24 @@ options: vacuum_cost_delay value). type: float default: 2.0 + vacuum_autovacuum_vacuum_cost_limit: + description: | + Vacuum cost amount available before napping, for autovacuum. + Allowed values are: from -1 to 10000. + type: int + default: -1 + vacuum_autovacuum_vacuum_insert_scale_factor: + description: | + Number of tuple inserts prior to vacuum as a fraction of reltuples. + Allowed values are: from 0 to 100. + type: float + default: 0.2 + vacuum_autovacuum_vacuum_insert_threshold: + description: | + Minimum number of tuple inserts prior to vacuum, or -1 to disable insert vacuums. + Allowed values are: from -1 to 2147483647. + type: int + default: 1000 vacuum_autovacuum_vacuum_scale_factor: description: | Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when @@ -404,13 +812,75 @@ options: Allowed values are: from 0 to 100. type: float default: 0.2 + vacuum_autovacuum_vacuum_threshold: + description: | + Minimum number of tuple updates or deletes prior to vacuum. + Allowed values are: from 0 to 2147483647. + type: int + default: 50 + vacuum_vacuum_cost_delay: + description: | + Vacuum cost delay in milliseconds. + Allowed values are: from 0 to 100. + type: float + default: 0.0 + vacuum_vacuum_cost_limit: + description: | + Vacuum cost amount available before napping. + Allowed values are: from 1 to 10000. + type: int + default: 200 + vacuum_vacuum_cost_page_dirty: + description: | + Vacuum cost for a page dirtied by vacuum. + Allowed values are: from 0 to 10000. + type: int + default: 20 + vacuum_vacuum_cost_page_hit: + description: | + Vacuum cost for a page found in the buffer cache. + Allowed values are: from 0 to 10000. + type: int + default: 1 + vacuum_vacuum_cost_page_miss: + description: | + Vacuum cost for a page not found in the buffer cache. + Allowed values are: from 0 to 10000. + type: int + default: 2 + vacuum_vacuum_failsafe_age: + description: | + Age at which VACUUM should trigger failsafe to avoid a wraparound outage. + Allowed values are: from 0 to 2100000000. + type: int + default: 1600000000 + vacuum_vacuum_freeze_min_age: + description: | + Minimum age at which VACUUM should freeze a table row. + Allowed values are: from 0 to 1000000000. + type: int + default: 50000000 vacuum_vacuum_freeze_table_age: description: | Age (in transactions) at which VACUUM should scan whole table to freeze tuples. Allowed values are: from 0 to 2000000000. type: int default: 150000000 - experimental_max_connections: + vacuum_vacuum_multixact_failsafe_age: + description: | + Multixact age at which VACUUM should trigger failsafe to avoid a wraparound outage. + Allowed values are: from 0 to 2100000000. type: int + default: 1600000000 + vacuum_vacuum_multixact_freeze_min_age: description: | - [EXPERIMENTAL] Force set max_connections. + Minimum age at which VACUUM should freeze a MultiXactId in a table row. + Allowed values are: from 0 to 1000000000. + type: int + default: 5000000 + vacuum_vacuum_multixact_freeze_table_age: + description: | + Multixact age at which VACUUM should scan whole table to freeze tuples. + Allowed values are: from 0 to 2000000000. + type: int + default: 150000000 \ No newline at end of file diff --git a/docs/explanation.md b/docs/explanation.md new file mode 100644 index 0000000000..71ec62ebfc --- /dev/null +++ b/docs/explanation.md @@ -0,0 +1,31 @@ +# Explanation + +This section contains pages with more detailed explanations that provide additional context about some of the key concepts behind the PostgreSQL charm: + +* [Architecture] +* [Interfaces and endpoints] +* [Connection pooling] +* [Statuses] +* [Users] +* [Logs] +* [Juju] +* [Legacy charm] + +Charm event flowcharts: +* [Charm] +* [Relations] +* [Backups] + + + +[Architecture]: /t/11856 +[Interfaces and endpoints]: /t/10252 +[Statuses]: /t/11855 +[Users]: /t/10843 +[Logs]: /t/12098 +[Juju]: /t/11986 +[Legacy charm]: /t/11013 +[Connection pooling]: /t/15799 +[Charm]: /t/9305 +[Relations]: /t/9306 +[Backups]: /t/10248 \ No newline at end of file diff --git a/docs/explanation/e-juju-details.md b/docs/explanation/e-juju-details.md index 63dfb7e660..9088e6f93f 100644 --- a/docs/explanation/e-juju-details.md +++ b/docs/explanation/e-juju-details.md @@ -1,32 +1,32 @@ -# Juju tech details - +# Juju [Juju](https://juju.is/) is an open source orchestration engine for software operators that enables the deployment, integration and lifecycle management of applications at any scale, on any infrastructure using charms. -This [charm](https://charmhub.io/postgresql-k8s) is an operator - business logic encapsulated in reusable software packages that automate every aspect of an application's life. Charms are shared via [CharmHub](https://charmhub.io/). +> See also: [Juju client documentation](https://juju.is/docs/juju), [Juju blog](https://ubuntu.com/blog/tag/juju) -See also: +## Compatibility with PostgreSQL +Current stable releases of this charm can still be deployed on Juju 2.9. However, newer features are not supported. +> See the [Releases page](/t/11872) for more information about the minimum Juju version required to operate the features of each revision. -* [Juju Documentation](https://juju.is/docs/juju) and [Blog](https://ubuntu.com/blog/tag/juju) -* [Charm SDK](https://juju.is/docs/sdk) +Additionally, there are limitations regarding integrations with other charms. For example, integration with [modern TLS charms](https://charmhub.io/topics/security-with-x-509-certificates) requires Juju 3.x. ## Breaking changes between Juju 2.9.x and 3.x -As this charm documentation is written for Juju 3.x, users of 2.9.x will encounter noteworthy changes when following the instructions. This section explains those changes. +As this charm's documentation is written for Juju 3.x, users of 2.9.x will encounter noteworthy changes when following the instructions. This section explains those changes. Breaking changes have been introduced in the Juju client between versions 2.9.x and 3.x. These are caused by the renaming and re-purposing of several commands - functionality and command options remain unchanged. -In the context of this guide, the pertinent changes are shown here: +In the context of this guide, the pertinent changes are as follows: -|2.9.x|3.x| +| v2.9.x | v3.x | | --- | --- | -|**add-relation**|**integrate**| -|**relate**|**integrate**| -|**run**|**exec**| -|**run-action --wait**|**run**| +|`add-relation`|`integrate`| +|`relate`|`integrate`| +|`run`|`exec`| +|`run-action --wait`|`run`| See the [Juju 3.0 release notes](https://juju.is/docs/juju/roadmap#heading--juju-3-0-0---22-oct-2022) for the comprehensive list of changes. -The response is to therefore substitute the documented command with the equivalent 2.9.x command. For example: +Example substitutions: ### Juju 3.x: ```shell diff --git a/docs/how-to.md b/docs/how-to.md new file mode 100644 index 0000000000..1b42509a59 --- /dev/null +++ b/docs/how-to.md @@ -0,0 +1,99 @@ +# How-to guides + +The following guides cover key processes and common tasks for managing and using Charmed PostgreSQL on Kubernetes. + +## Deployment and setup + +The following guides walk you through the details of how to install different cloud services and bootstrap them to Juju: +* [Canonical K8s] +* [MicroK8s] +* [GKE] +* [EKS] +* [AKS] +* [Multi-availability zones (AZ)][Multi-AZ] + +The following guides cover some specific deployment scenarios and architectures: +* [Terraform] +* [Air-gapped] + +## Usage and maintenance + +* [Integrate with another application] +* [External access] +* [Scale replicas] +* [Enable TLS] +* [Enable plugins/extensions] + +## Backup and restore +* [Configure S3 AWS] +* [Configure S3 RadosGW] +* [Create a backup] +* [Restore a backup] +* [Manage backup retention] +* [Migrate a cluster] + +## Monitoring (COS) + +* [Enable monitoring] +* [Enable alert rules] +* [Enable tracing] + +## Minor upgrades +* [Perform a minor upgrade] +* [Perform a minor rollback] + +## Cross-regional (cluster-cluster) async replication + +* [Cross-regional async replication] + * [Set up clusters] + * [Integrate with a client app] + * [Remove or recover a cluster] + * [Enable plugins/extensions] + +## Development + +This section is aimed at charm developers looking to support PostgreSQL integrations with their charm. + +* [Integrate with your charm] +* [Migrate data via pg_dump] +* [Migrate data via backup/restore] + + + +[Canonical K8s]: /t/15937 +[MicroK8s]: /t/11858 +[GKE]: /t/11237 +[EKS]: /t/12106 +[AKS]: /t/14307 +[Multi-AZ]: /t/15678 +[Terraform]: /t/14924 +[Air-gapped]: /t/15691 + +[Integrate with another application]: /t/9594 +[External access]: /t/15701 +[Scale replicas]: /t/9592 +[Enable TLS]: /t/9593 +[Enable plugins/extensions]: /t/10907 + +[Configure S3 AWS]: /t/9595 +[Configure S3 RadosGW]: /t/10316 +[Create a backup]: /t/9596 +[Restore a backup]: /t/9597 +[Manage backup retention]: /t/14203 +[Migrate a cluster]: /t/9598 + +[Enable monitoring]: /t/10812 +[Enable alert rules]: /t/12982 +[Enable tracing]: /t/14786 + +[Perform a minor upgrade]: /t/12095 +[Perform a minor rollback]: /t/12096 + +[Cross-regional async replication]: /t/15413 +[Set up clusters]: /t/13895 +[Integrate with a client app]: /t/13896 +[Remove or recover a cluster]: /t/13897 + +[Integrate with your charm]: /t/11853 +[Migrate data via pg_dump]: /t/12162 +[Migrate data via backup/restore]: /t/12161 \ No newline at end of file diff --git a/docs/how-to/h-deploy-microk8s.md b/docs/how-to/h-deploy-microk8s.md deleted file mode 100644 index 77ff967c61..0000000000 --- a/docs/how-to/h-deploy-microk8s.md +++ /dev/null @@ -1,48 +0,0 @@ -# How to deploy on MicroK8s - -This guide assumes you have a running Juju and [MicroK8s](https://microk8s.io/docs) environment. - -For a detailed walkthrough of setting up an environment and deploying the charm on MicroK8s, refer to the following Tutorial pages: -* [1. Set up the environment](/t/9297) - set up Multipass and Juju -* [2. Deploy PostgreSQL](/t/9298) - deploy PostgresQL K8s in a Multipass instance - -MicroK8s can be installed on a multitude of platforms and environments for different use cases. See all options and details in the [official documentation](https://microk8s.io/docs/install-alternatives). - -[note type="caution"] -Not all platforms supported by MicroK8s will work with this charm - keep in mind the [system requirements](/t/11744) of Charmed PostgreSQL. -[/note] - -## Prerequisites -* Canonical MicroK8s 1.27+ -* Fulfill the general [system requirements](/t/11744) - ---- - -[Bootstrap](https://juju.is/docs/juju/juju-bootstrap) a juju controller and create a [model](https://juju.is/docs/juju/juju-add-model) if you haven't already: -```shell -juju bootstrap localhost -juju add-model -``` - -Deploy PostgreSQL K8s: - -```shell -juju deploy postgresql-k8s --trust -``` -> :warning: The `--trust` flag is necessary to create some K8s resources - -> See the [`juju deploy` documentation](https://juju.is/docs/juju/juju-deploy) for all available options at deploy time. -> -> See the [Configurations tab](https://charmhub.io/postgresql-k8s/configurations) for specific PostgreSQL K8s parameters. - -Example `juju status --wait 1s` output: -```shell -Model Controller Cloud/Region Version SLA Timestamp -example charm-dev microk8s/localhost 2.9.42 unsupported 12:00:43+01:00 - -App Version Status Scale Charm Channel Rev Address Exposed Message -postgresql-k8s active 1 postgresql-k8s 14/stable 56 10.152.183.167 no - -Unit Workload Agent Address Ports Message -postgresql-k8s/0* active idle 10.1.188.206 -``` \ No newline at end of file diff --git a/docs/how-to/h-deploy.md b/docs/how-to/h-deploy.md new file mode 100644 index 0000000000..e6c73d48f2 --- /dev/null +++ b/docs/how-to/h-deploy.md @@ -0,0 +1,75 @@ +# How to deploy + +This page aims to provide an introduction to the PostgreSQL deployment process and lists all the related guides. It contains the following sections: +* [General deployment instructions](#general-deployment-instructions) +* [Clouds](#clouds) +* [Special deployments](#special-deployments) + +--- + +## General deployment instructions + +The basic requirements for deploying a charm are the [**Juju client**](https://juju.is/docs/juju) and a Kubernetes [**cloud**](https://juju.is/docs/juju/cloud). + +First, [bootstrap](https://juju.is/docs/juju/juju-bootstrap) the cloud controller and create a [model](https://canonical-juju.readthedocs-hosted.com/en/latest/user/reference/model/): +```shell +juju bootstrap +juju add-model +``` + +Then, either continue with the `juju` client **or** use the `terraform juju` client to deploy the PostgreSQL charm. + +To deploy with the `juju` client: +```shell +juju deploy postgresql-k8s --trust +``` +> See also: [`juju deploy` command](https://canonical-juju.readthedocs-hosted.com/en/latest/user/reference/juju-cli/list-of-juju-cli-commands/deploy/) + +To deploy with `terraform juju`, follow the guide [How to deploy using Terraform](/t/). +> See also: [Terraform Provider for Juju documentation](https://canonical-terraform-provider-juju.readthedocs-hosted.com/en/latest/) + +If you are not sure where to start or would like a more guided walkthrough for setting up your environment, see the [Charmed PostgreSQL K8s tutorial][Tutorial]. + +## Clouds + +The guides below go through the steps to install different cloud services and bootstrap them to Juju: +* [Canonical K8s] +* [Google Kubernetes Engine] +* [Amazon Elastic Kubernetes Service] +* [Azure Kubernetes Service] + +[How to deploy on multiple availability zones (AZ)] demonstrates how to deploy a cluster on a cloud using different AZs for high availability. + +## Special deployments + +These guides cover some specific deployment scenarios and architectures. + +### External network access + +See [How to connect from outside the local network] for guidance on connecting with a client application outside PostgreSQL's Kubernetes cluster. + +### Airgapped +[How to deploy in an offline or air-gapped environment] goes over the special configuration steps for installing PostgreSQL in an airgapped environment via CharmHub and the Snap Store Proxy. + +### Cluster-cluster replication +Cluster-cluster, cross-regional, or multi-server asynchronous replication focuses on disaster recovery by distributing data across different servers. + +The [Cross-regional async replication] guide goes through the steps to set up clusters for cluster-cluster replication, integrate with a client, and remove or recover a failed cluster. + +[Tutorial]: /t/9296 + +[How to deploy using Terraform]: /t/14924 + +[Canonical K8s]: /t/15937 +[Google Kubernetes Engine]: /t/11237 +[Amazon Elastic Kubernetes Service]: /t/12106 +[Azure Kubernetes Service]: /t/14307 + +[How to deploy on multiple availability zones (AZ)]: /t/15678 + + +[How to enable TLS]: /t/9593 +[How to connect from outside the local network]: /t/15701 + +[How to deploy in an offline or air-gapped environment]: /t/15691 +[Cross-regional async replication]: /t/15413 \ No newline at end of file diff --git a/docs/how-to/h-enable-tracing.md b/docs/how-to/h-enable-tracing.md index 9878704dee..229ab64673 100644 --- a/docs/how-to/h-enable-tracing.md +++ b/docs/how-to/h-enable-tracing.md @@ -85,7 +85,7 @@ juju deploy grafana-agent-k8s --channel latest-edge Then, integrate Grafana Agent K8s with the consumed interface from the previous section: ```shell -juju integrate grafana-agent-k8s: tracing tempo:tracing +juju integrate grafana-agent-k8s:tracing tempo:tracing ``` Finally, integrate Charmed PostgreSQL K8s with Grafana Agent K8s: diff --git a/docs/how-to/h-restore-backup.md b/docs/how-to/h-restore-backup.md index 66916b645c..ad81e0db1a 100644 --- a/docs/how-to/h-restore-backup.md +++ b/docs/how-to/h-restore-backup.md @@ -15,9 +15,9 @@ To restore a backup that was made from a *different* cluster, (i.e. cluster migr - Access to S3 storage - [Configured settings for S3 storage](/t/charmed-postgresql-k8s-how-to-configure-s3/9595?channel=14/stable) - [Existing backups in your S3-storage](/t/charmed-postgresql-k8s-how-to-create-and-list-backups/9596?channel=14/stable) -- [Point-in-time recovery](#point-in-time-recovery) requires the following PostgreSQL charm revisions: - - 435+ for `arm64` - - 436+ for `amd64` +- [Point-in-time recovery](#point-in-time-recovery) requires the following PostgreSQL K8s charm revisions: + - 382+ for `arm64` + - 381+ for `amd64` ## Summary * [List backups](#list-backups) diff --git a/docs/how-to/h-upgrade.md b/docs/how-to/h-upgrade.md new file mode 100644 index 0000000000..1d1c3779ff --- /dev/null +++ b/docs/how-to/h-upgrade.md @@ -0,0 +1,12 @@ +# Upgrade + +Currently, the charm supports PostgreSQL major version 14 only. Therefore, in-place upgrades/rollbacks are not possible for major versions. + +> **Note**: Canonical is not planning to support in-place upgrades for major version change. The new PostgreSQL K8s charm will have to be installed nearby, and the data will be copied from the old to the new installation. After announcing the next PostgreSQL major version support, the appropriate documentation for data migration will be published. + +For instructions on carrying out **minor version upgrades**, see the following guides: + +* [Minor upgrade](/t/12095), e.g. PostgreSQL 14.8 -> PostgreSQL 14.9
+(including charm revision bump 42 -> 43). +* [Minor rollback](/t/12096), e.g. PostgreSQL 14.9 -> PostgreSQL 14.8
+(including charm revision return 43 -> 42). \ No newline at end of file diff --git a/docs/overview.md b/docs/overview.md index 2d81fee464..93ccfc91cf 100644 --- a/docs/overview.md +++ b/docs/overview.md @@ -39,8 +39,7 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme | Level | Path | Navlink | |--------|--------|-------------| -| 1 | tutorial | [Tutorial]() | -| 2 | t-overview | [Overview](/t/9296) | +| 1 | tutorial | [Tutorial](/t/9296) | | 2 | t-set-up | [1. Set up environment](/t/9297) | | 2 | t-deploy | [2. Deploy PostgreSQL](/t/9298) | | 2 | t-access | [3. Access PostgreSQL](/t/13702) | @@ -49,10 +48,9 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme | 2 | t-integrate | [6. Integrate with other applications](/t/9301) | | 2 | t-enable-tls | [7. Enable TLS encryption](/t/9302) | | 2 | t-clean-up | [8. Clean up environment](/t/9303) | -| 1 | how-to | [How to]() | -| 2 | h-deploy | [Deploy]() | +| 1 | how-to | [How-to guides](/t/16767) | +| 2 | h-deploy | [Deploy](/t/16810) | | 3 | h-deploy-canonical-k8s | [Canonical K8s](/t/15937) | -| 3 | h-deploy-microk8s | [MicroK8s](/t/11858) | | 3 | h-deploy-gke | [GKE](/t/11237) | | 3 | h-deploy-eks | [EKS](/t/12106) | | 3 | h-deploy-aks | [AKS](/t/14307) | @@ -60,9 +58,10 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme | 3 | h-deploy-terraform | [Terraform](/t/14924) | | 3 | h-deploy-airgapped | [Air-gapped](/t/15691) | | 2 | h-integrate | [Integrate with another application](/t/9594) | -| 2 | h-external-access | [External access](/t/15701) | +| 2 | h-external-access | [External network access](/t/15701) | | 2 | h-scale | [Scale replicas](/t/9592) | | 2 | h-enable-tls | [Enable TLS](/t/9593) | +| 2 | h-enable-plugins-extensions | [Enable plugins/extensions](/t/10907) | | 2 | h-backup | [Back up and restore]() | | 3 | h-configure-s3-aws | [Configure S3 AWS](/t/9595) | | 3 | h-configure-s3-radosgw | [Configure S3 RadosGW](/t/10316) | @@ -74,29 +73,19 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme | 3 | h-enable-monitoring | [Enable monitoring](/t/10812) | | 3 | h-enable-alert-rules | [Enable alert rules](/t/12982) | | 3 | h-enable-tracing | [Enable tracing](/t/14786) | -| 2 | h-upgrade | [Minor upgrades]() | +| 2 | h-upgrade | [Upgrade](/t/12092) | | 3 | h-upgrade-minor | [Perform a minor upgrade](/t/12095) | | 3 | h-rollback-minor | [Perform a minor rollback](/t/12096) | | 2 | h-async | [Cross-regional async replication](/t/15413) | | 3 | h-async-set-up | [Set up clusters](/t/13895) | | 3 | h-async-integrate | [Integrate with a client app](/t/13896) | | 3 | h-async-remove-recover | [Remove or recover a cluster](/t/13897) | -| 2 | h-enable-plugins-extensions | [Enable plugins/extensions](/t/10907) | | 2 | h-development| [Development]() | -| 3 | h-development-integrate | [Integrate a database with your charm](/t/11853) | +| 3 | h-development-integrate | [Integrate with your charm](/t/11853) | | 3 | h-migrate-pgdump | [Migrate data via pg_dump](/t/12162) | | 3 | h-migrate-backup-restore | [Migrate data via backup/restore](/t/12161) | -| 1 | reference | [Reference]() | -| 2 | r-overview | [Overview](/t/13977) | -| 2 | r-releases | [Release Notes](/t/11872) | -| 3 | r-revision-462-463 | [Revision 462/463](/t/16008) | -| 3 | r-revision-444-445 | [Revision 444/445](/t/15966) | -| 3 | r-revision-381-382 | [Revision 381/382](/t/15442) | -| 3 | r-revision-280-281 | [Revision 280/281](/t/14068) | -| 3 | r-revision-193 | [Revision 193](/t/13208) | -| 3 | r-revision-177 | [Revision 177](/t/12668) | -| 3 | r-revision-158 | [Revision 158](/t/11874) | -| 3 | r-revision-73 | [Revision 73](/t/11873) | +| 1 | reference | [Reference](/t/13977) | +| 2 | r-releases | [Releases](/t/11872) | | 2 | r-system-requirements | [System requirements](/t/11744) | | 2 | r-software-testing | [Software testing](/t/11774) | | 2 | r-performance | [Performance and resources](/t/11975) | @@ -104,15 +93,15 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme | 2 | r-plugins-extensions | [Plugins/extensions](/t/10945) | | 2 | r-alert-rules | [Alert rules](/t/15840) | | 2 | r-contacts | [Contacts](/t/11852) | -| 1 | explanation | [Explanation]() | +| 1 | explanation | [Explanation](/t/16769) | | 2 | e-architecture | [Architecture](/t/11856) | | 2 | e-interfaces-endpoints | [Interfaces/endpoints](/t/10252) | +| 2 | e-connection-pooling| [Connection pooling](/t/15799) | | 2 | e-statuses | [Statuses](/t/11855) | | 2 | e-users | [Users](/t/10843) | | 2 | e-logs | [Logs](/t/12098) | | 2 | e-juju-details | [Juju](/t/11986) | | 2 | e-legacy-charm | [Legacy charm](/t/11013) | -| 2 | e-connection-pooling| [Connection pooling](/t/15799) | | 2 | flowcharts | [Flowcharts]() | | 3 | e-flowchart-charm | [Charm](/t/9305) | | 3 | e-flowchart-peers | [Relations](/t/9306) | @@ -121,8 +110,19 @@ PostgreSQL is a trademark or registered trademark of PostgreSQL Global Developme [/details] - \ No newline at end of file diff --git a/docs/reference/r-overview.md b/docs/reference.md similarity index 85% rename from docs/reference/r-overview.md rename to docs/reference.md index ee8dc154e9..cfdd3ba1c8 100644 --- a/docs/reference/r-overview.md +++ b/docs/reference.md @@ -1,4 +1,4 @@ -# Overview +# Reference The Reference section of our documentation contains pages for technical specifications, APIs, release notes, and other reference material for fast lookup. @@ -6,11 +6,12 @@ The Reference section of our documentation contains pages for technical specific | Page | Description | |---------------------------|---------------------------------------------------| | [Release Notes](/t/11872) | Release notes for major revisions of this charm | -| [Requirements](/t/11744) | Software and hardware requirements | -| [Testing](/t/11774) | Software tests (e.g. smoke, unit, performance...) | +| [System requirements](/t/11744) | Software and hardware requirements | +| [Software testing](/t/11774) | Software tests (e.g. smoke, unit, performance...) | +| [Performance and resources](/t/11975) | Config profiles related to performance | | [Troubleshooting](/t/11854) | Troubleshooting tips and tricks | -| [Profiles](/t/11975) | Config profiles related to performance | | [Plugins/extensions](/t/10945) | Plugins/extensions supported by each charm revision | +| [Alert rules](/t/15840) | Pre-configured Prometheus alert rules | | [Contacts](/t/11852) | Contact information | **In the tabs at the top of the page**, you can find the following automatically generated API references: diff --git a/docs/reference/r-releases.md b/docs/reference/r-releases.md index d391a71c91..a047bf050d 100644 --- a/docs/reference/r-releases.md +++ b/docs/reference/r-releases.md @@ -1,32 +1,29 @@ -# Release Notes +# Releases This page provides high-level overviews of the dependencies and features that are supported by each revision in every stable release. To learn more about the different release tracks and channels, see the [Juju documentation about channels](https://juju.is/docs/juju/channel#heading--risk). -To see all releases and commits, check the [Charmed PostgreSQL Releases page on GitHub](https://github.com/canonical/postgresql-k8s-operator/releases). +To see all releases and commits, check the [Charmed PostgreSQL Releases on GitHub](https://github.com/canonical/postgresql-k8s-operator/releases). ## Dependencies and supported features For a given release, this table shows: * The PostgreSQL version packaged inside * The minimum Juju version required to reliably operate **all** features of the release - > This charm still supports older versions of Juju down to 2.9. See the [Juju section of the system requirements](/t/) for more details + > This charm still supports older versions of Juju down to 2.9. See the [system requirements](/t/11744) for more details * Support for specific features -| Revision | PostgreSQL version | Juju version | [TLS encryption](/t/9685)* | [COS monitoring](/t/10600) | [Minor version upgrades](/t/) | [Cross-regional async replication](/t/) | [Point-in-time recovery](/t/) -|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| -| [444], [445] | 14.12 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | ![check] | -| [381], [382] | 14.12 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | ![check] | -| [280], [281] | 14.11 | `3.4.2+` | ![check] | ![check] | ![check] | ![check] | -| [193] | 14.10 | `3.4.2+` | ![check] | ![check] | ![check] | ![check] | -| [177] | 14.9 | `3.1.6+` | | ![check] | ![check] | -| [158] | 14.9 | `3.1.5+` | | ![check] | ![check] | -| [73] | 14.7 | `2.9.32+` | | | - - +| Revision | PostgreSQL version | Juju version | [TLS encryption](/t/9685)* | [COS monitoring](/t/10600) | [Minor version upgrades](/t/12092) | [Cross-regional async replication](/t/15413) | [Point-in-time recovery](/t/9597) | [PITR Timelines](/t/9597) | +|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| [462], [463] | 14.13 | `3.6.1+` | ![check] | ![check] | ![check] | ![check] | ![check] | ![check] | +| [444], [445] | 14.12 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | ![check] | | +| [381], [382] | 14.12 | `3.4.3+` | ![check] | ![check] | ![check] | ![check] | ![check] | | +| [280], [281] | 14.11 | `3.4.2+` | ![check] | ![check] | ![check] | ![check] | | +| [193] | 14.10 | `3.4.2+` | ![check] | ![check] | ![check] | ![check] | | +| [177] | 14.9 | `3.1.6+` | | ![check] | ![check] | | +| [158] | 14.9 | `3.1.5+` | | ![check] | ![check] | | +| [73] | 14.7 | `2.9.32+` | | | | **TLS encryption***: Support for **`v2` or higher** of the [`tls-certificates` interface](https://charmhub.io/tls-certificates-interface/libraries/tls_certificates). This means that you can integrate with [modern TLS charms](https://charmhub.io/topics/security-with-x-509-certificates). @@ -40,24 +37,22 @@ Several [revisions](https://juju.is/docs/sdk/revision) are released simultaneous > If you deploy a specific revision, **you must make sure it matches your base and architecture** via the tables below or with [`juju info`](https://juju.is/docs/juju/juju-info). - +|[462] | ![check] | | ![check] | +|[463] | | ![check] | ![check] | -### Release 444-445 (`14/stable`) +[details=Older releases] +### Release 444-445 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS |:--------:|:-----:|:-----:|:-----:| |[445] | | ![check] | ![check] | |[444] | ![check] | | ![check] | -[details=Older releases] -### Release 381-382 (`14/stable`) +### Release 381-382 | Revision | amd64 | arm64 | Ubuntu 22.04 LTS |:--------:|:-----:|:-----:|:-----:| @@ -101,21 +96,26 @@ Several [revisions](https://juju.is/docs/sdk/revision) are released simultaneous For a list of all plugins supported for each revision, see the reference page [Plugins/extensions](/t/10945). -[note] - Our release notes are an ongoing work in progress. If there is any additional information about releases that you would like to see or suggestions for other improvements, don't hesitate to contact us on [Matrix ](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) or [leave a comment](https://discourse.charmhub.io/t/charmed-postgresql-k8s-reference-release-notes/11872). -[/note] + +> **Note** Our release notes are an ongoing work in progress. If there is any additional information about releases that you would like to see or suggestions for other improvements, don't hesitate to contact us on [Matrix ](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) or [leave a comment](https://discourse.charmhub.io/t/charmed-postgresql-k8s-reference-release-notes/11872). -[445]: /t/15966 -[444]: /t/15966 -[382]: /t/15442 -[381]: /t/15442 -[281]: /t/14068 -[280]: /t/14068 -[193]: /t/13208 -[177]: /t/12668 -[158]: /t/11874 -[73]: /t/11873 +[462]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev462 +[463]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev462 + +[445]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev444 +[444]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev444 + +[382]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev381 +[381]: [/t/15442](https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev381) + +[281]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev280 +[280]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev280 + +[193]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev193 +[177]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev177 +[158]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev158 +[73]: https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev73 [check]: https://img.icons8.com/color/20/checkmark--v1.png \ No newline at end of file diff --git a/docs/reference/r-revision-158.md b/docs/reference/r-revision-158.md deleted file mode 100644 index d89a1324ad..0000000000 --- a/docs/reference/r-revision-158.md +++ /dev/null @@ -1,78 +0,0 @@ ->Reference > Release Notes > [All revisions](/t/11872) > [Revision 158](/t/11874) -# Revision 158 -Wednesday, October 18, 2023 - -Dear community, - -We'd like to announce that Canonical's newest Charmed PostgreSQL operator for Kubernetes has been published in the `14/stable` [channel](https://charmhub.io/postgresql-k8s?channel=14/stable). :tada: - -If you are jumping over several stable revisions, make sure to check [previous release notes](/t/11872) before upgrading to this revision. - -## Features you can start using today -* [Add Juju 3 support](/t/11744) (Juju 2 is still supported) [[DPE-1758](https://warthogs.atlassian.net/browse/DPE-1758)] -* All secrets are now stored in [Juju secrets](https://juju.is/docs/juju/manage-secrets) [[DPE-1758](https://warthogs.atlassian.net/browse/DPE-1758)] -* Charm [minor upgrades](/t/12095) and [minor rollbacks](/t/12096) [[DPE-1767](https://warthogs.atlassian.net/browse/DPE-1767)] -* [Canonical Observability Stack (COS)](https://charmhub.io/topics/canonical-observability-stack) support [[DPE-1775](https://warthogs.atlassian.net/browse/DPE-1775)] -* [PostgreSQL plugins support](/t/10945) [[DPE-1372](https://warthogs.atlassian.net/browse/DPE-1372)] -* [Profiles configuration](/t/11975) support [[DPE-2656](https://warthogs.atlassian.net/browse/DPE-2656)] -* [Logs rotation](/t/12098) [[DPE-1755](https://warthogs.atlassian.net/browse/DPE-1755)] -* Workload updated to [PostgreSQL 14.9](https://www.postgresql.org/docs/14/release-14-9.html) [[PR#18](https://github.com/canonical/charmed-postgresql-snap/pull/18)] -* Add '`admin`' [extra user role](https://github.com/canonical/postgresql-k8s-operator/pull/201) [[DPE-2167](https://warthogs.atlassian.net/browse/DPE-2167)] -* New charm '[PostgreSQL Test App](https://charmhub.io/postgresql-test-app)' -* New documentation: - * [Architecture (HLD/LLD)](/t/11856) - * [Upgrade section](/t/12092) - * [Release Notes](/t/11872) - * [Requirements](/t/11744) - * [Profiles](/t/11975) - * [Users](/t/10843) - * [Logs](/t/12098) - * [Statuses](/t/11855) - * [Development](/t/11851) - * [Testing reference](/t/11774) - * [Legacy charm](/t/11013) - * [Plugins/extensions](/t/10907), [supported](/t/10945) - * [Juju 2.x vs 3.x hints](/t/11986) - * [Contacts](/t/11852) -* All the functionality from [the previous revisions](/t/11873) - -## Bugfixes - -Canonical Data issues are now public on both [Jira](https://warthogs.atlassian.net/jira/software/c/projects/DPE/issues/) and [GitHub](https://github.com/canonical/postgresql-k8s-operator/issues) platforms.
[GitHub Releases](https://github.com/canonical/postgresql-k8s-operator/releases) provide a detailed list of bugfixes/PRs/Git commits for each revision.
Highlights for the current revision: - -* [DPE-1470](https://warthogs.atlassian.net/browse/DPE-1470), [DPE-2419](https://warthogs.atlassian.net/browse/DPE-2419) Fixed K8s resources cleanup -* [DPE-1584](https://warthogs.atlassian.net/browse/DPE-1584) Backup/restore stabilization bugfixes -* [DPE-2546](https://warthogs.atlassian.net/browse/DPE-2546) Split stanza create and stanza check (backup stabilization) -* [DPE-2626](https://warthogs.atlassian.net/browse/DPE-2626), [DPE-2627](https://warthogs.atlassian.net/browse/DPE-2627) Create bucket once and clear up blocked statuses (backup stabilization) -* [DPE-2657](https://warthogs.atlassian.net/browse/DPE-2657) Fix replication after restore -* [DPE-1590](https://warthogs.atlassian.net/browse/DPE-1590) Fixed deployment on old microk8s (e.g. 1.22) -* [DPE-2193](https://warthogs.atlassian.net/browse/DPE-2193) Fixed databases access to requested db only -* [DPE-1999](https://warthogs.atlassian.net/browse/DPE-1999) Fixed TLS race condition in new relations (stuck in 'awaiting for cluster to start'/'awaiting for member to start') -* [DPE-2338](https://warthogs.atlassian.net/browse/DPE-2338) Use SCRAM by default -* [DPE-2616](https://warthogs.atlassian.net/browse/DPE-2616) Auto-tune profile `production` (mimic defaults of [the legacy charm](/t/11013)) -* [DPE-2569](https://warthogs.atlassian.net/browse/DPE-2569) Set waiting status while extensions are being enabled -* [DPE-2015](https://warthogs.atlassian.net/browse/DPE-2015), [DPE-2044](https://warthogs.atlassian.net/browse/DPE-2044) Add missing zoneinfo - -## Inside the charms -* Charmed PostgreSQL K8s ships the latest PostgreSQL “14.9-0ubuntu0.22.04.1” -* PostgreSQL cluster manager Patroni updated to "3.0.2" -* Backup tools pgBackRest updated to "2.47" -* The Prometheus postgres-exporter is "0.12.1-0ubuntu0.22.04.1~ppa1" -* K8s charms [based on our](https://github.com/orgs/canonical/packages?tab=packages&q=charmed) ROCK OCI (Ubuntu LTS “22.04” - ubuntu:22.04-based) -* Principal charms supports the latest LTS series “22.04” only. -* Subordinate charms support LTS “22.04” and “20.04” only. - -## Technical notes - -* `juju refresh` from the old-stable revision 73 to the current-revision 158 is **NOT** supported!!!
The [upgrade](/t/12092) functionality is new and supported for revision 158+ only! -* Please check [the external components requirements](/t/11744) -* Please check additionally [the previously posted restrictions](/t/11873) -* Ensure [the charm requirements](/t/11744) met - -## Contact us - -Charmed PostgreSQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. - -* Raise software issues or feature requests on [**GitHub**](https://github.com/canonical/postgresql-k8s-operator/issues/new/choose) -* Report security issues through [**Launchpad**](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) -* Contact the Canonical Data Platform team through our [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) channel. \ No newline at end of file diff --git a/docs/reference/r-revision-177.md b/docs/reference/r-revision-177.md deleted file mode 100644 index 279ca83d88..0000000000 --- a/docs/reference/r-revision-177.md +++ /dev/null @@ -1,61 +0,0 @@ ->Reference > Release Notes > [All revisions](/t/11872) > [Revision 177](/t/12668) - -# Revision 177 -January 3, 2024 - -Dear community, - -We'd like to announce that Canonical's newest Charmed PostgreSQL operator for Kubernetes has been published in the `14/stable` [channel](https://charmhub.io/postgresql-k8s?channel=14/stable). :tada: - -If you are jumping over several stable revisions, make sure to check [previous release notes](/t/11875) before upgrading to this revision. - -## Features you can start using today - -* [Core] Updated `Charmed PostgreSQL` ROCK image in ([PR#336](https://github.com/canonical/postgresql-k8s-operator/pull/336))([DPE-3039](https://warthogs.atlassian.net/browse/DPE-3039)): - * `Patroni` updated from 3.0.2 to 3.1.2 - * `Pgbackrest` updated from 2.47 to 2.48 -* [Plugins] [Add 24 new plugins/extension](https://charmhub.io/postgresql-k8s/docs/r-plugins-extensions) in ([PR#294](https://github.com/canonical/postgresql-k8s-operator/pull/294)) -* [Plugins] **NOTE**: extension `plpython3u` is deprecated and will be removed from [list of supported plugins](/t/10945) soon! -* [Config] [Add 29 new configuration options](https://charmhub.io/postgresql-k8s/configure) in ([PR#281](https://github.com/canonical/postgresql-k8s-operator/pull/281))([DPE-1782](https://warthogs.atlassian.net/browse/DPE-1782)) -* [Config] **NOTE:** the config option `profile-limit-memory` is deprecated. Use `profile_limit_memory` (to follow the [naming conventions](https://juju.is/docs/sdk/naming))! ([PR#348](https://github.com/canonical/postgresql-k8s-operator/pull/348))([DPE-3095](https://warthogs.atlassian.net/browse/DPE-3095)) -* [Charm] Add Juju Secret labels in ([PR#303](https://github.com/canonical/postgresql-k8s-operator/pull/303))([DPE-2838](https://warthogs.atlassian.net/browse/DPE-2838)) -* [Charm] Update Python dependencies in ([PR#315](https://github.com/canonical/postgresql-k8s-operator/pull/315))([PR#318](https://github.com/canonical/postgresql-k8s-operator/pull/318)) -* [DB] Add handling of tables ownership in ([PR#334](https://github.com/canonical/postgresql-k8s-operator/pull/334))([DPE-2740](https://warthogs.atlassian.net/browse/DPE-2740)) -* [[COS](https://charmhub.io/topics/canonical-observability-stack)] Moved Grafana dashboard legends to the bottom of the graph in ([PR#337](https://github.com/canonical/postgresql-k8s-operator/pull/337))([DPE-2622](https://warthogs.atlassian.net/browse/DPE-2622)) -* [CI/CD] Charm migrated to GitHub Data reusable workflow in ([PR#338](https://github.com/canonical/postgresql-k8s-operator/pull/338))([DPE-3064](https://warthogs.atlassian.net/browse/DPE-3064)) -* All the functionality from [the previous revisions](/t/11872) - -## Bugfixes - -Canonica Data issues are now public on both [Jira](https://warthogs.atlassian.net/jira/software/c/projects/DPE/issues/) and [GitHub](https://github.com/canonical/postgresql-k8s-operator/issues) platforms.
[GitHub Releases](https://github.com/canonical/postgresql-k8s-operator/releases) provide a detailed list of bugfixes/PRs/Git commits for each revision. - -* Fixed handle scaling to zero units in ([PR#331](https://github.com/canonical/postgresql-k8s-operator/pull/331))([DPE-2728](https://warthogs.atlassian.net/browse/DPE-2728)) -* Fixed plugins enabling performance by toggling all plugins in one go ([PR#322](https://github.com/canonical/postgresql-k8s-operator/pull/322))([DPE-2903](https://warthogs.atlassian.net/browse/DPE-2903)) -* Fixed enabling extensions when new database is created in ([PR#290](https://github.com/canonical/postgresql-k8s-operator/pull/290))([DPE-2569](https://warthogs.atlassian.net/browse/DPE-2569)) -* Fixed locales availability in ROCK ([PR#291](https://github.com/canonical/postgresql-k8s-operator/pull/291)) - -## Inside the charms - -* Charmed PostgreSQL K8s ships the latest PostgreSQL “14.9-0ubuntu0.22.04.1” -* PostgreSQL cluster manager Patroni updated to "3.2.1" -* Backup tools pgBackRest updated to "2.48" -* The Prometheus postgres-exporter is "0.12.1-0ubuntu0.22.04.1~ppa1" -* K8s charms [based on our](https://github.com/orgs/canonical/packages?tab=packages&q=charmed) ROCK OCI (Ubuntu LTS “22.04” - ubuntu:22.04-based) based on SNAP revision 89 -* Principal charms supports the latest LTS series “22.04” only -* Subordinate charms support LTS “22.04” and “20.04” only - -## Technical notes: - -* Upgrade (`juju refresh`) is possible from this revision 158+ -* Use this operator together with a modern operator "[pgBouncer K8s](https://charmhub.io/pgbouncer-k8s)" -* Please check [the external components requirements](/t/11744) -* Please check additionally [the previously posted restrictions](/t/11872) -* Ensure [the charm requirements](/t/11744) met - -## Contact us - -Charmed PostgreSQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. - -* Raise software issues or feature requests on [**GitHub**](https://github.com/canonical/postgresql-k8s-operator/issues/new/choose) -* Report security issues through [**Launchpad**](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) -* Contact the Canonical Data Platform team through our [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) channel. \ No newline at end of file diff --git a/docs/reference/r-revision-193.md b/docs/reference/r-revision-193.md deleted file mode 100644 index 5a4a4402c7..0000000000 --- a/docs/reference/r-revision-193.md +++ /dev/null @@ -1,55 +0,0 @@ ->Reference > Release Notes > [All revisions](/t/11872) > [Revision 193](/t/13208) -# Revision 193 -March 13, 2024 - -Dear community, - -We'd like to announce that Canonical's newest Charmed PostgreSQL operator for Kubernetes has been published in the `14/stable` [channel](https://charmhub.io/postgresql-k8s?channel=14/stable). :tada: - -If you are jumping over several stable revisions, make sure to check [previous release notes](/t/11872) before upgrading to this revision. - -## Features you can start using today -* [CORE] PostgreSQL upgrade 14.9 -> 14.10. ([DPE-3217](https://warthogs.atlassian.net/browse/DPE-3217)) - * **Note**: It is advisable to REINDEX potentially-affected indexes after installing this update! (See [PostgreSQL14.10 changelog](https://changelogs.ubuntu.com/changelogs/pool/main/p/postgresql-14/postgresql-14_14.10-0ubuntu0.22.04.1/changelog)) -* [CORE] Juju 3.1.7+ support ([#2037120](https://bugs.launchpad.net/juju/+bug/2037120)) -* [PLUGINS] pgVector extension/plugin ([DPE-3159](https://warthogs.atlassian.net/browse/DPE-3159)) -* [PLUGINS] New PostGIS plugin ([#363](https://github.com/canonical/postgresql-k8s-operator/pull/363)) -* [PLUGINS] More new plugins - [50 in total](/t/10945) -* [MONITORING] COS Awesome Alert rules ([DPE-3161](https://warthogs.atlassian.net/browse/DPE-3161)) -* [SECURITY] Updated TLS libraries for compatibility with new charms - * [manual-tls-certificates](https://charmhub.io/manual-tls-certificates) - * [self-signed-certificates](https://charmhub.io/self-signed-certificates) - * Any charms compatible with [ tls_certificates_interface.v2.tls_certificates](https://charmhub.io/tls-certificates-interface/libraries/tls_certificates) -* All functionality from [previous revisions](/t/11872) - -## Bugfixes -* Stabilized internal Juju secrets management ([DPE-3199](https://warthogs.atlassian.net/browse/DPE-3199) | [#358](https://github.com/canonical/postgresql-k8s-operator/pull/358)) -* Check system identifier in stanza (backups setup stabilization) ([DPE-3061](https://warthogs.atlassian.net/browse/DPE-3061)) - -Canonical Data issues are now public on both [Jira](https://warthogs.atlassian.net/jira/software/c/projects/DPE/issues/) and [GitHub](https://github.com/canonical/postgresql-k8s-operator/issues) platforms. -[GitHub Releases](https://github.com/canonical/postgresql-k8s-operator/releases) provide a detailed list of bugfixes, PRs, and commits for each revision. - -## Inside the charms - -* Charmed PostgreSQL ships the **PostgreSQL** `14.10-0ubuntu0.22.04.1` -* PostgreSQL cluster manager **Patroni** - `v.3.1.2` -* Backup tools **pgBackRest** - `v.2.48` -* The Prometheus **postgres_exporter** is `0.12.1-0ubuntu0.22.04.1~ppa1` -* This charm uses [ROCK OCI](https://github.com/orgs/canonical/packages?tab=packages&q=charmed) (postgresql-image resource-revision 149) based on SNAP revision 96 -* This charm ships the latest base `Ubuntu LTS 22.04.3` - -## Technical notes - -* Starting with this revision (193+), you can use `juju refresh` to upgrade Charmed PostgreSQL K8s -* It is recommended to use this operator together with modern [Charmed PgBouncer operator](https://charmhub.io/pgbouncer-k8s?channel=1/stable) -* Please check [the external components requirements](/t/11744) -* Please check [the previously posted restrictions](/t/11872) -* Ensure [the charm requirements](/t/11744) met - -## Contact us - -Charmed PostgreSQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. - -* Raise software issues or feature requests on [**GitHub**](https://github.com/canonical/postgresql-k8s-operator/issues/new/choose) -* Report security issues through [**Launchpad**](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) -* Contact the Canonical Data Platform team through our [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) channel. \ No newline at end of file diff --git a/docs/reference/r-revision-280-281.md b/docs/reference/r-revision-280-281.md deleted file mode 100644 index 1ccf7bf6e1..0000000000 --- a/docs/reference/r-revision-280-281.md +++ /dev/null @@ -1,99 +0,0 @@ ->Reference > Release Notes > [All revisions](/t/11872) > Revision 280/281 -# Revision 280/281 - -June 28, 2024 - -Dear community, - -Canonical's newest Charmed PostgreSQL K8s operator has been published in the '14/stable' [channel](https://charmhub.io/postgresql-k8s?channel=14/stable) :tada: - -Due to the newly added support for `arm64` architecture, the PostgreSQL charm now releases two revisions simultaneously: -* Revision 281 is built for `amd64` -* Revision 280 is built for for `arm64` - -To make sure you deploy for the right architecture, we recommend setting an [architecture constraint](https://juju.is/docs/juju/constraint#heading--arch) for your entire juju model. - -Otherwise, it can be done at deploy time with the `--constraints` flag: -```shell -juju deploy postgresql-k8s --constraints arch= --trust -``` -where `` can be `amd64` or `arm64`. - -[note] -If you are jumping over several stable revisions, make sure to check [previous release notes](/t/11872) before upgrading to this revision. -[/note] - -## Highlights - -* Upgraded PostgreSQL from v.14.10 → v.14.11 ([PR #432](https://github.com/canonical/postgresql-operator/pull/432)) - * Check the official [PostgreSQL release notes](https://www.postgresql.org/docs/release/14.11/) -* Added support for ARM64 architecture ([PR #408](https://github.com/canonical/postgresql-k8s-operator/pull/408)) -* Added support for cross-regional asynchronous replication ([PR #447](https://github.com/canonical/postgresql-k8s-operator/pull/447)) ([DPE-2897](https://warthogs.atlassian.net/browse/DPE-2897)) - * This feature focuses on disaster recovery by distributing data across different servers. Check our [new how-to guides](https://charmhub.io/postgresql-k8s/docs/h-async-setup) for a walkthrough of the cross-model setup, promotion, switchover, and other details. -* Added support for tracing with Tempo K8s ([PR #497](https://github.com/canonical/postgresql-k8s-operator/pull/497)) - * Check the new guide: [How to enable tracing](https://charmhub.io/postgresql-k8s/docs/h-enable-tracing) -* Released new [Charmed Sysbench operator](https://charmhub.io/sysbench) for easy performance testing - -### Enhancements -* Added timescaledb plugin/extension ([PR #488](https://github.com/canonical/postgresql-k8s-operator/pull/488)) - * See the [Configuration tab]((https://charmhub.io/postgresql-k8s/configuration#plugin_timescaledb_enable)) for all parameters. -* Added incremental and differential backup support ([PR #487](https://github.com/canonical/postgresql-k8s-operator/pull/487))([PR #476](https://github.com/canonical/postgresql-k8s-operator/pull/476))([DPE-4464](https://warthogs.atlassian.net/browse/DPE-4464)) - * Check the guide: [How to create and list backups](https://charmhub.io/postgresql-k8s/docs/h-create-backup) -* Added support for disabling the operator ([DPE-2470](https://warthogs.atlassian.net/browse/DPE-2470)) -* Added configuration option for backup retention time ([PR #477](https://github.com/canonical/postgresql-k8s-operator/pull/477))([DPE-4401](https://warthogs.atlassian.net/browse/DPE-4401)) - * See the[ Configuration tab](https://charmhub.io/s3-integrator/configuration?channel=latest/edge#experimental-delete-older-than-days) for all parameters -* Added message to inform users about missing `--trust` flag ([PR #440](https://github.com/canonical/postgresql-k8s-operator/pull/440))([DPE-3885](https://warthogs.atlassian.net/browse/DPE-3885)) -* Added `experimental_max_connections` config option ([PR #500](https://github.com/canonical/postgresql-k8s-operator/pull/500)) -* Introduced a block on legacy roles request (modern interface only) ([PR#391](https://github.com/canonical/postgresql-k8s-operator/pull/391))([DPE-3099](https://warthogs.atlassian.net/browse/DPE-3099)) - -### Bugfixes - -* Fixed large objects ownership ([PR #390](https://github.com/canonical/postgresql-k8s-operator/pull/390))([DPE-3551](https://warthogs.atlassian.net/browse/DPE-3551)) -* Fixed shared buffers validation ([PR #396](https://github.com/canonical/postgresql-k8s-operator/pull/396))([DPE-3594](https://warthogs.atlassian.net/browse/DPE-3594)) -* Fixed handling S3 relation in primary non-leader unit ([PR #375](https://github.com/canonical/postgresql-k8s-operator/pull/375))([DPE-3349](https://warthogs.atlassian.net/browse/DPE-3349)) -* Stabilized SST and network cut tests ([PR #385](https://github.com/canonical/postgresql-k8s-operator/pull/385))([DPE-3473](https://warthogs.atlassian.net/browse/DPE-3473)) -* Fixed pod reconciliation: rerender config/service on pod recreation ([PR#461](https://github.com/canonical/postgresql-k8s-operator/pull/461))([DPE-2671](https://warthogs.atlassian.net/browse/DPE-2671)) -* Addressed main instability sources on backups integration tests ([PR#496](https://github.com/canonical/postgresql-k8s-operator/pull/496))([DPE-4427](https://warthogs.atlassian.net/browse/DPE-4427)) -* Fixed scale up with S3 and TLS relations in ([PR#489](https://github.com/canonical/postgresql-k8s-operator/pull/489))([DPE-4456](https://warthogs.atlassian.net/browse/DPE-4456)) - -Canonical Data issues are now public on both [Jira](https://warthogs.atlassian.net/jira/software/c/projects/DPE/issues/) and [GitHub](https://github.com/canonical/postgresql-k8s-operator/issues). - -For a full list of all changes in this revision, see the [GitHub Release](https://github.com/canonical/postgresql-k8s-operator/releases/tag/rev281). - -## Technical details -This section contains some technical details about the charm's contents and dependencies. Make sure to also check the [system requirements](/t/11744). - -### Packaging -This charm is based on the [`charmed-postgresql` snap](https://snapcraft.io/charmed-postgresql) (pinned revision 113). It packages: -* postgresql `v.14.11` - * [`14.11-0ubuntu0.22.04.1`](https://launchpad.net/ubuntu/+source/postgresql-14/14.11-0ubuntu0.22.04.1) -* pgbouncer `v.1.21` - * [`1.21.0-0ubuntu0.22.04.1~ppa1`](https://launchpad.net/~data-platform/+archive/ubuntu/pgbouncer) -* patroni `v.3.1.2 ` - * [`3.1.2-0ubuntu0.22.04.1~ppa2`](https://launchpad.net/~data-platform/+archive/ubuntu/patroni) -* pgBackRest `v.2.48` - * [`2.48-0ubuntu0.22.04.1~ppa1`](https://launchpad.net/~data-platform/+archive/ubuntu/pgbackrest) -* prometheus-postgres-exporter `v.0.12.1` - -### Libraries and interfaces -This charm revision imports the following libraries: - -* **grafana_agent `v0`** for integration with Grafana - * Implements `cos_agent` interface -* **rolling_ops `v0`** for rolling operations across units - * Implements `rolling_op` interface -* **tempo_k8s `v1`, `v2`** for integration with Tempo charm - * Implements `tracing` interface -* **tls_certificates_interface `v2`** for integration with TLS charms - * Implements `tls-certificates` interface - -See the [`/lib/charms` directory on GitHub](https://github.com/canonical/postgresql-k8s-operator/tree/main/lib/charms) for more details about all supported libraries. - -See the [`metadata.yaml` file on GitHub](https://github.com/canonical/postgresql-k8s-operator/blob/main/metadata.yaml#L20-L77) for a full list of supported interfaces - -## Contact us - -Charmed PostgreSQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. -* Raise software issues or feature requests on [**GitHub**](https://github.com/canonical/postgresql-k8s-operator/issues) -* Report security issues through [**Launchpad**](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) -* Contact the Canonical Data Platform team through our [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) channel. \ No newline at end of file diff --git a/docs/reference/r-revision-381-382.md b/docs/reference/r-revision-381-382.md deleted file mode 100644 index 76ad01cbf3..0000000000 --- a/docs/reference/r-revision-381-382.md +++ /dev/null @@ -1,182 +0,0 @@ ->Reference > Release Notes > [All revisions] > Revision 381/382 - -# Revision 381/382 -September 11, 2024 - -Dear community, - -Canonical's newest Charmed PostgreSQL K8s operator has been published in the [14/stable channel]. - -Due to the newly added support for `arm64` architecture, the PostgreSQL K8s charm now releases multiple revisions simultaneously: -* Revision 381 is built for `amd64` on Ubuntu 22.04 LTS (postgresql-image r162) -* Revision 382 is built for `arm64` on Ubuntu 22.04 LTS (postgresql-image r162) - -To make sure you deploy for the right architecture, we recommend setting an [architecture constraint](https://juju.is/docs/juju/constraint#heading--arch) for your entire juju model. - -Otherwise, it can be done at deploy time with the `--constraints` flag: -```shell -juju deploy postgresql-k8s --trust --constraints arch= -``` -where `` can be `amd64` or `arm64`. - ---- - -## Highlights - -* Upgraded PostgreSQL from v.14.11 → v.14.12 ([PR #563](https://github.com/canonical/postgresql-k8s-operator/pull/563)) - * Check the official [PostgreSQL release notes](https://www.postgresql.org/docs/release/14.12/) -* Added support for Point In Time Recovery ([PR #554](https://github.com/canonical/postgresql-k8s-operator/pull/554)) ([DPE-4839](https://warthogs.atlassian.net/browse/DPE-4839)) -* Added COS tracing support with [tempo-k8s](https://charmhub.io/tempo-k8s) ([PR #497](https://github.com/canonical/postgresql-k8s-operator/pull/497)) ([DPE-4617](https://warthogs.atlassian.net/browse/DPE-4617)) - -## Features - -* Added user warning when deploying charm with wrong architecture ([PR #613](https://github.com/canonical/postgresql-k8s-operator/pull/613)) ([DPE-4239](https://warthogs.atlassian.net/browse/DPE-4239)) -* Improved backups behavior ([PR #542](https://github.com/canonical/postgresql-k8s-operator/pull/542)) ([DPE-4479](https://warthogs.atlassian.net/browse/DPE-4479)) -* Add libpq's connection string URI format to `uri` field in relation databag ([PR #545](https://github.com/canonical/postgresql-k8s-operator/pull/545)) ([DPE-2278](https://warthogs.atlassian.net/browse/DPE-2278)) -* Changed 'master' to 'primary' in Patroni leader role ([PR #532](https://github.com/canonical/postgresql-k8s-operator/pull/532)) ([DPE-1177](https://warthogs.atlassian.net/browse/DPE-1177)) -* Added password to Patroni's REST API ([PR #661](https://github.com/canonical/postgresql-k8s-operator/pull/661)) ([DPE-5275](https://warthogs.atlassian.net/browse/DPE-5275)) -* Improve pgbackrest logging ([PR #587](https://github.com/canonical/postgresql-k8s-operator/pull/587)) - -## Bugfixes and stability - -* Restart pebble service if it's down ([PR #581](https://github.com/canonical/postgresql-k8s-operator/pull/581)) ([DPE-4806](https://warthogs.atlassian.net/browse/DPE-4806)) -* Switched test app interface ([PR #595](https://github.com/canonical/postgresql-k8s-operator/pull/595)) -* Addeded missing `await` to `invalid_extra_user_roles` integration test + fix check loop ([PR #602](https://github.com/canonical/postgresql-k8s-operator/pull/602)) -* Fixed UTC time zone ([PR #592](https://github.com/canonical/postgresql-k8s-operator/pull/592)) -* Fix PITR test on Juju 2.9 ([PR #596](https://github.com/canonical/postgresql-k8s-operator/pull/596)) ([DPE-4990](https://warthogs.atlassian.net/browse/DPE-4990)) -* Fixed storage ownership ([PR #580](https://github.com/canonical/postgresql-k8s-operator/pull/580)) ([DPE-4227](https://warthogs.atlassian.net/browse/DPE-4227)) -* Fixed get-password action description ([PR #605](https://github.com/canonical/postgresql-k8s-operator/pull/605)) ([DPE-5019](https://warthogs.atlassian.net/browse/DPE-5019)) -* Quick fix for blocked CI ([PR #533](https://github.com/canonical/postgresql-k8s-operator/pull/533)) -* CI stability fixes + slicing tests ([PR #524](https://github.com/canonical/postgresql-k8s-operator/pull/524)) ([DPE-4620](https://warthogs.atlassian.net/browse/DPE-4620)) -* Added test for relations coherence ([PR #505](https://github.com/canonical/postgresql-k8s-operator/pull/505)) -* Addressed test_charm and test_self_healing instabilities ([PR #510](https://github.com/canonical/postgresql-k8s-operator/pull/510)) ([DPE-4594](https://warthogs.atlassian.net/browse/DPE-4594)) -* Split PITR backup test in AWS and GCP ([PR #664](https://github.com/canonical/postgresql-k8s-operator/pull/664)) ([DPE-5244](https://warthogs.atlassian.net/browse/DPE-5244)) -* Import JujuVersion from ops.jujuversion instead of ops.model ([PR #640](https://github.com/canonical/postgresql-k8s-operator/pull/640)) -* Don't block on missing Postgresql version ([PR #626](https://github.com/canonical/postgresql-k8s-operator/pull/626)) ([DPE-3562](https://warthogs.atlassian.net/browse/DPE-3562)) -* Run integration tests on arm64 ([PR #478](https://github.com/canonical/postgresql-k8s-operator/pull/478)) -* Improved async replication stability ([PR #526](https://github.com/canonical/postgresql-k8s-operator/pull/526)) ([DPE-4736](https://warthogs.atlassian.net/browse/DPE-4736)) -* Removed deprecated config option `profile-limit-memory` ([PR #608](https://github.com/canonical/postgresql-k8s-operator/pull/608)) -* Pause Patroni in the TLS test ([PR #588](https://github.com/canonical/postgresql-k8s-operator/pull/588)) ([DPE-4533](https://warthogs.atlassian.net/browse/DPE-4533)) -* Enforce Juju versions ([PR #544](https://github.com/canonical/postgresql-k8s-operator/pull/544)) ([DPE-4811](https://warthogs.atlassian.net/browse/DPE-4811)) -* Block charm if plugin disable fails due to dependent objects ([PR #567](https://github.com/canonical/postgresql-k8s-operator/pull/567)) ([DPE-4801](https://warthogs.atlassian.net/browse/DPE-4801)) -* Temporarily disable log forwarding & fix for race in Patroni REST password setup ([PR #663](https://github.com/canonical/postgresql-k8s-operator/pull/663)) -* Use manifest file to check for charm architecture ([PR #665](https://github.com/canonical/postgresql-k8s-operator/pull/665)) ([DPE-4239](https://warthogs.atlassian.net/browse/DPE-4239)) -* Only write app data if leader ([PR #676](https://github.com/canonical/postgresql-k8s-operator/pull/676)) ([DPE-5325](https://warthogs.atlassian.net/browse/DPE-5325)) -* Added log for `fix_leader_annotation` method ([PR #679](https://github.com/canonical/postgresql-k8s-operator/pull/679)) - -## Known limitations - - * The unit action `resume-upgrade` randomly raises a [harmless error message](https://warthogs.atlassian.net/browse/DPE-5420): `terminated`. - * The [charm sysbench](https://charmhub.io/sysbench) may [crash](https://warthogs.atlassian.net/browse/DPE-5436) during a PostgreSQL charm refresh. - * Make sure that [cluster-cluster replication](/t/13895) is requested for the same charm/workload revisions. An automated check is [planned](https://warthogs.atlassian.net/browse/DPE-5419). - * [Contact us](/t/11852) to schedule [a cluster-cluster replication](/t/13895) upgrade with you. - -If you are jumping over several stable revisions, check [previous release notes][All revisions] before upgrading. - -## Requirements and compatibility -This charm revision features the following changes in dependencies: -* (increased) The minimum Juju version required to reliably operate **all** features of the release is `v3.4.5` - > You can upgrade to this revision on Juju `v2.9.50+`, but it will not support newer features like cross-regional asynchronous replication, point-in-time recovery, and modern TLS certificate charm integrations. -* (increased) PostgreSQL version 14.12 - -See the [system requirements] for more details about Juju versions and other software and hardware prerequisites. - -### Integration tests -Below are the charm integrations tested with this revision on different Juju environments and architectures: -* Juju `v2.9.50` on `amd64` -* Juju `v3.4.5` on `amd64` and `arm64` - -#### Juju `v2.9.50` on `amd64` - -| Software | Version | -|-----|-----| -| [tls-certificates-operator] | `rev 22`, `legacy/stable` | - -#### Juju `v3.4.5` on `amd64` and `arm64` - -| Software | Version | -|-----|-----| -| [self-signed-certificates] | `rev 155`, `latest/stable` | - -#### All -| Software | Version | -|-----|-----| -| [microk8s] | `v.1.31`, `strict/stable` | -| [indico] | `rev 213` | -| [discourse-k8s] | `rev 124` | -| [data-integrator] | `rev 41` | -| [s3-integrator] | `rev 31` | -| [postgresql-test-app] | `rev 239` | - -See the [`/lib/charms` directory on GitHub] for more details about all supported libraries. - -See the [`metadata.yaml` file on GitHub] for a full list of supported interfaces. - -## Packaging - -This charm is based on the Charmed PostgreSQL K8s [rock image]
-(CharmHub `postgresql-image` resource-revision is 162). It packages: -* [postgresql `v.14.12`] -* [pgbouncer `v.1.21`] -* [patroni `v.3.1.2 `] -* [pgBackRest `v.2.48`] -* [prometheus-postgres-exporter `v.0.12.1`] - -## Dependencies and automations -[details=This section contains a list of updates to libs, dependencies, actions, and workflows.] - -* Updated canonical/charming-actions action to v2.6.3 ([PR #673](https://github.com/canonical/postgresql-k8s-operator/pull/673)) -* Updated data-platform-workflows to v21.0.1 ([PR #660](https://github.com/canonical/postgresql-k8s-operator/pull/660)) -* Updated dependency canonical/microk8s to v1.31 ([PR #632](https://github.com/canonical/postgresql-k8s-operator/pull/632)) -* Updated dependency cryptography to v43.0.1([PR #681](https://github.com/canonical/postgresql-k8s-operator/pull/681)) -* Updated dependency juju/juju to v2.9.50 ([PR #589](https://github.com/canonical/postgresql-k8s-operator/pull/589)) -* Updated dependency juju/juju to v3.4.5 ([PR #599](https://github.com/canonical/postgresql-k8s-operator/pull/599)) -* Updated dependency tenacity to v9 ([PR #600](https://github.com/canonical/postgresql-k8s-operator/pull/600)) -* Updated ghcr.io/canonical/charmed-postgresql:14.12-22.04_edge Docker digest to 7ef86a3 ([PR #655](https://github.com/canonical/postgresql-k8s-operator/pull/655)) -* Updated rock to 14.12 ([PR #563](https://github.com/canonical/postgresql-k8s-operator/pull/563)) -* Switch Jira issue sync from workflow to bot ([PR #636](https://github.com/canonical/postgresql-k8s-operator/pull/636)) -* Use poetry package-mode=false ([PR #594](https://github.com/canonical/postgresql-k8s-operator/pull/594)) -* Updated logging: bump lib and introduce pebble log forwarding ([PR #486](https://github.com/canonical/postgresql-k8s-operator/pull/486)) -* Updated postgresql lib ([PR #546](https://github.com/canonical/postgresql-k8s-operator/pull/546)) -* Bumped coverage ([PR #623](https://github.com/canonical/postgresql-k8s-operator/pull/623)) -* Test service patch lib update ([PR #624](https://github.com/canonical/postgresql-k8s-operator/pull/624)) -[/details] - - -[All revisions]: /t/11872 -[system requirements]: /t/11744 - - -[`/lib/charms` directory on GitHub]: https://github.com/canonical/postgresql-k8s-operator/tree/main/lib/charms -[`metadata.yaml` file on GitHub]: https://github.com/canonical/postgresql-k8s-operator/blob/main/metadata.yaml - - -[14/stable channel]: https://charmhub.io/postgresql?channel=14/stable - - -[`charmed-postgresql` packaging]: https://github.com/canonical/charmed-postgresql-rock -[rock image]: ghcr.io/canonical/charmed-postgresql@sha256:7ef86a352c94e2a664f621a1cc683d7a983fd86e923d98c32b863f717cb1c173 - -[postgresql `v.14.12`]: https://launchpad.net/ubuntu/+source/postgresql-14/14.12-0ubuntu0.22.04.1 -[pgbouncer `v.1.21`]: https://launchpad.net/~data-platform/+archive/ubuntu/pgbouncer -[patroni `v.3.1.2 `]: https://launchpad.net/~data-platform/+archive/ubuntu/patroni -[pgBackRest `v.2.48`]: https://launchpad.net/~data-platform/+archive/ubuntu/pgbackrest -[prometheus-postgres-exporter `v.0.12.1`]: https://launchpad.net/~data-platform/+archive/ubuntu/postgres-exporter - - -[juju]: https://juju.is/docs/juju/ -[lxd]: https://documentation.ubuntu.com/lxd/en/latest/ -[nextcloud]: https://charmhub.io/nextcloud -[mailman3-core]: https://charmhub.io/mailman3-core -[data-integrator]: https://charmhub.io/data-integrator -[s3-integrator]: https://charmhub.io/s3-integrator -[postgresql-test-app]: https://charmhub.io/postgresql-test-app -[discourse-k8s]: https://charmhub.io/discourse-k8s -[indico]: https://charmhub.io/indico -[microk8s]: https://charmhub.io/microk8s -[tls-certificates-operator]: https://charmhub.io/tls-certificates-operator -[self-signed-certificates]: https://charmhub.io/self-signed-certificates - - -[amd64]: https://img.shields.io/badge/amd64-darkgreen -[arm64]: https://img.shields.io/badge/arm64-blue \ No newline at end of file diff --git a/docs/reference/r-revision-444-445.md b/docs/reference/r-revision-444-445.md deleted file mode 100644 index b15260258e..0000000000 --- a/docs/reference/r-revision-444-445.md +++ /dev/null @@ -1,30 +0,0 @@ ->Reference > Release Notes > [All revisions] > Revision 444/445 - -# Revision 444/445 (hotfix for 381/382) -12 November 2024 - -Dear community, - -Canonical has released a hotfix for Charmed PostgreSQL K8s operator in the [14/stable channel]: -* Revision 444 is built for `amd64` on Ubuntu 22.04 LTS (postgresql-image r162) -* Revision 445 is built for `arm64` on Ubuntu 22.04 LTS (postgresql-image r162) - -## Highlights - -This is a hotfix release to add Juju 3.6 compatibility for the previous stable [revisions 381/382](/t/15442). - -## Bugfixes and stability - -* Fixed Juju 3.6 support - fixed Pebble 1.12+ compatibility ([DPE-5915](https://warthogs.atlassian.net/browse/DPE-5915)) - -## Known limitations - -See the [Release Notes for Revisions 381/382](/t/15442). - -If you are jumping over several stable revisions, check [previous release notes][All revisions] before upgrading. - - -[All revisions]: /t/11872 -[system requirements]: /t/11744 - -[14/stable channel]: https://charmhub.io/postgresql?channel=14/stable \ No newline at end of file diff --git a/docs/reference/r-revision-462-463.md b/docs/reference/r-revision-462-463.md deleted file mode 100644 index f5e831ca39..0000000000 --- a/docs/reference/r-revision-462-463.md +++ /dev/null @@ -1,151 +0,0 @@ ->Reference > Release Notes > [All revisions] > Revision 462/463 - -[note type=caution] -This page is a work in progress for a **future release**. Please revisit at a later date! -[/note] - -# Revision 462/463 - - -Canonical's newest Charmed PostgreSQL K8s operator has been published in the [14/stable channel]. - -Due to the newly added support for `arm64` architecture, the PostgreSQL charm now releases multiple revisions simultaneously: -* Revision is built for `amd64` on Ubuntu 22.04 LTS -* Revision is built for `arm64` on Ubuntu 22.04 LTS - -> See also: [How to perform a minor upgrade] - -### Contents -* [Highlights](#highlights) -* [Features and improvements](#features-and-improvements) -* [Bugfixes and maintenance](#bugfixes-and-maintenance) -* [Known limitations](#known-limitations) -* [Requirements and compatibility](#requirements-and-compatibility) - * [Packaging](#packaging) ---- - -## Highlights -* Added timeline management to point-in-time recovery (PITR) ([PR #716](https://github.com/canonical/postgresql-k8s-operator/pull/716)) ([DPE-5581](https://warthogs.atlassian.net/browse/DPE-5581)) -* Added pgAudit plugin/extension ([PR #688](https://github.com/canonical/postgresql-k8s-operator/pull/688)) ([DPE-5116](https://warthogs.atlassian.net/browse/DPE-5116)) -* Observability stack (COS) improvements - * Polished built-in Grafana dashboard ([PR #733](https://github.com/canonical/postgresql-k8s-operator/pull/733)) ([DPE-4469](https://warthogs.atlassian.net/browse/DPE-4469)) - * Improved COS alert rule descriptions ([PR #727](https://github.com/canonical/postgresql-k8s-operator/pull/727)) ([DPE-5658](https://warthogs.atlassian.net/browse/DPE-5658)) -* Added fully-featured terraform module ([PR #737](https://github.com/canonical/postgresql-k8s-operator/pull/737)) ([DPE-5627](https://warthogs.atlassian.net/browse/DPE-5627)) -* Several S3 improvements ([PR #750](https://github.com/canonical/postgresql-k8s-operator/pull/750)) - -## Features and improvements -* Removed patching of private ops class. ([PR #692](https://github.com/canonical/postgresql-k8s-operator/pull/692)) -* Switched charm libs from `tempo_k8s` to `tempo_coordinator_k8s` and test relay support of tracing traffic through `grafana-agent-k8s` ([PR #725](https://github.com/canonical/postgresql-k8s-operator/pull/725)) -* Added check for low storage space on pgdata volume ([PR #685](https://github.com/canonical/postgresql-k8s-operator/pull/685)) ([DPE-5301](https://warthogs.atlassian.net/browse/DPE-5301)) -* Re-enabled log forwarding ([PR #671](https://github.com/canonical/postgresql-k8s-operator/pull/671)) -* Avoid replication slot deletion ([PR #680](https://github.com/canonical/postgresql-k8s-operator/pull/680)) ([DPE-3887](https://warthogs.atlassian.net/browse/DPE-3887)) -* Added pgBackRest logrotate configuration ([PR #722](https://github.com/canonical/postgresql-k8s-operator/pull/722)) ([DPE-5600](https://warthogs.atlassian.net/browse/DPE-5600)) -* Grant priviledges to non-public schemas ([PR #742](https://github.com/canonical/postgresql-k8s-operator/pull/742)) ([DPE-5387](https://warthogs.atlassian.net/browse/DPE-5387)) -* Added TLS flag + CA to relation databag ([PR #719](https://github.com/canonical/postgresql-k8s-operator/pull/719)) ([DPE-5484](https://warthogs.atlassian.net/browse/DPE-5484)) -* Added warning logs to Patroni reinitialisation ([PR #753](https://github.com/canonical/postgresql-k8s-operator/pull/753)) ([DPE-5712](https://warthogs.atlassian.net/browse/DPE-5712)) -* Reduced pgdate permissions ([PR #759](https://github.com/canonical/postgresql-k8s-operator/pull/759)) ([DPE-5915](https://warthogs.atlassian.net/browse/DPE-5915)) -* Split off new interface client app tests ([PR #761](https://github.com/canonical/postgresql-k8s-operator/pull/761)) -* Temporarily disable log forwarding ([PR #757](https://github.com/canonical/postgresql-k8s-operator/pull/757)) -* Changed owner of functions, procedures and aggregates ([PR #773](https://github.com/canonical/postgresql-k8s-operator/pull/773)) -* Only update tls flags on leader ([PR #770](https://github.com/canonical/postgresql-k8s-operator/pull/770)) -* Preload shared libs on normal PG start ([PR #774](https://github.com/canonical/postgresql-k8s-operator/pull/774)) ([DPE-6033](https://warthogs.atlassian.net/browse/DPE-6033)) - -## Bugfixes and maintenance -* Fixed PITR backup test instabilities ([PR #690](https://github.com/canonical/postgresql-k8s-operator/pull/690)) -* Fixed some `postgresql.conf` parameters for hardening ([PR #702](https://github.com/canonical/postgresql-k8s-operator/pull/702)) ([DPE-5511](https://warthogs.atlassian.net/browse/DPE-5511)) -* Fixed event deferring issue with missing S3 relation ([PR #762](https://github.com/canonical/postgresql-k8s-operator/pull/762)) ([DPE-5934](https://warthogs.atlassian.net/browse/DPE-5934)) -* Fixed connection rejection rule in `pg_hba.conf` ([PR #751](https://github.com/canonical/postgresql-k8s-operator/pull/751)) ([DPE-5689](https://warthogs.atlassian.net/browse/DPE-5689)) - -[details=Libraries, testing, and CI] -* [Hotfix] Remove failing tests from CI ([PR #693](https://github.com/canonical/postgresql-k8s-operator/pull/693)) -* Reenable full cluster restart tests ([PR #559](https://github.com/canonical/postgresql-k8s-operator/pull/559)) ([DPE-5327](https://warthogs.atlassian.net/browse/DPE-5327)) -* Reenable label rollback test ([PR #754](https://github.com/canonical/postgresql-k8s-operator/pull/754)) ([DPE-5693](https://warthogs.atlassian.net/browse/DPE-5693)) -* Use more meaningful group naming for multi-group tests ([PR #707](https://github.com/canonical/postgresql-k8s-operator/pull/707)) -* Reenable labelling tests ([PR #728](https://github.com/canonical/postgresql-k8s-operator/pull/728)) -* increase async replication tests coverage ([PR #748](https://github.com/canonical/postgresql-k8s-operator/pull/748)) ([DPE-5662](https://warthogs.atlassian.net/browse/DPE-5662)) -* Run integration tests against Juju 3.6 ([PR #689](https://github.com/canonical/postgresql-k8s-operator/pull/689)) ([DPE-4977](https://warthogs.atlassian.net/browse/DPE-4977)) -* Lock file maintenance Python dependencies ([PR #777](https://github.com/canonical/postgresql-k8s-operator/pull/777)) -* Migrate config .github/renovate.json5 ([PR #769](https://github.com/canonical/postgresql-k8s-operator/pull/769)) -* Switch from tox build wrapper to charmcraft.yaml overrides ([PR #708](https://github.com/canonical/postgresql-k8s-operator/pull/708)) -* Update codecov/codecov-action action to v5 ([PR #771](https://github.com/canonical/postgresql-k8s-operator/pull/771)) -* Update data-platform-workflows to v23.0.5 ([PR #776](https://github.com/canonical/postgresql-k8s-operator/pull/776)) -* Update dependency juju/juju to v2.9.51 ([PR #717](https://github.com/canonical/postgresql-k8s-operator/pull/717)) -* Update dependency juju/juju to v3.4.6 ([PR #720](https://github.com/canonical/postgresql-k8s-operator/pull/720)) -* Update dependency ubuntu to v24 ([PR #711](https://github.com/canonical/postgresql-k8s-operator/pull/711)) -* Update ghcr.io/canonical/charmed-postgresql Docker tag to v14.13 ([PR #658](https://github.com/canonical/postgresql-k8s-operator/pull/658)) -[/details] - -## Known limitations -... - - -## Requirements and compatibility -* (no change) Minimum Juju 2 version: `v.2.9.49` -* (no change) Minimum Juju 3 version: `v.3.4.3` - -See the [system requirements] for more details about Juju versions and other software and hardware prerequisites. - -### Integration tests -Below are some of the charm integrations tested with this revision on different Juju environments and architectures: -* Juju `v.2.9.51` on `amd64` -* Juju `v.3.4.6` on `amd64` and `arm64` - -| Software | Revision | Tested on | | -|-----|-----|----|---| -| [postgresql-test-app] | `rev 279` | ![juju-2_amd64] ![juju-3_amd64] | -| | `rev 278` | ![juju-3_arm64] | -| [data-integrator] | `rev 41` | ![juju-2_amd64] ![juju-3_amd64] | -| | `rev 40` | ![juju-3_arm64] | -| [s3-integrator] | `rev 77` | ![juju-2_amd64] ![juju-3_amd64] | -| | `rev 78` | ![juju-3_arm64] | -| [tls-certificates-operator] | `rev 22` | ![juju-2_amd64] | -| [self-signed-certificates] | `rev 155` | ![juju-3_amd64] | -| | `rev 205` | ![juju-3_arm64] | -| [mattermost-k8s] | `rev 27` | ![juju-2_amd64] ![juju-3_amd64] | -| [indico] | `rev 233` | ![juju-2_amd64] ![juju-3_amd64] | -| [redis-k8s] | `rev 7`| ![juju-2_amd64] ![juju-3_amd64] | -| | `rev 38` | ![juju-2_amd64] ![juju-3_amd64] | -| [discourse-k8s] | `rev 173` | ![juju-2_amd64] ![juju-3_amd64] | - -### Packaging -This charm is based on the Charmed PostgreSQL K8s [ROCK ]. It packages: -* [postgresql] `v.14.12` -* [pgbouncer] `v.1.21` -* [patroni] `v.3.1.2 ` -* [pgBackRest] `v.2.53` -* [prometheus-postgres-exporter] `v.0.12.1` - - -[All revisions]: /t/11872 -[system requirements]: /t/11744 -[How to perform a minor upgrade]: /t/12095 - -[juju]: https://juju.is/docs/juju/ -[lxd]: https://documentation.ubuntu.com/lxd/en/latest/ -[nextcloud]: https://charmhub.io/nextcloud -[mailman3-core]: https://charmhub.io/mailman3-core -[data-integrator]: https://charmhub.io/data-integrator -[s3-integrator]: https://charmhub.io/s3-integrator -[postgresql-test-app]: https://charmhub.io/postgresql-test-app -[discourse-k8s]: https://charmhub.io/discourse-k8s -[indico]: https://charmhub.io/indico -[microk8s]: https://charmhub.io/microk8s -[tls-certificates-operator]: https://charmhub.io/tls-certificates-operator -[self-signed-certificates]: https://charmhub.io/self-signed-certificates -[landscape-client]: https://charmhub.io/landscape-client -[ubuntu-advantage]: https://charmhub.io/ubuntu-advantage -[mattermost-k8s]: https://charmhub.io/mattermost-k8s -[redis-k8s]: https://charmhub.io/redis-k8s - -[`/lib/charms` directory on GitHub]: https://github.com/canonical/postgresql-k8s-operator/tree/rev463/lib/charms -[`metadata.yaml` file on GitHub]: https://github.com/canonical/postgresql-k8s-operator/blob/rev463/metadata.yaml - -[postgresql]: https://launchpad.net/ubuntu/+source/postgresql-14/ -[pgbouncer]: https://launchpad.net/~data-platform/+archive/ubuntu/pgbouncer -[patroni]: https://launchpad.net/~data-platform/+archive/ubuntu/patroni -[pgBackRest]: https://launchpad.net/~data-platform/+archive/ubuntu/pgbackrest -[prometheus-postgres-exporter]: https://launchpad.net/~data-platform/+archive/ubuntu/postgres-exporter - -[juju-2_amd64]: https://img.shields.io/badge/Juju_2.9.51-amd64-darkgreen?labelColor=ea7d56 -[juju-3_amd64]: https://img.shields.io/badge/Juju_3.4.6-amd64-darkgreen?labelColor=E95420 -[juju-3_arm64]: https://img.shields.io/badge/Juju_3.4.6-arm64-blue?labelColor=E95420 \ No newline at end of file diff --git a/docs/reference/r-revision-73.md b/docs/reference/r-revision-73.md deleted file mode 100644 index 767f1a6683..0000000000 --- a/docs/reference/r-revision-73.md +++ /dev/null @@ -1,54 +0,0 @@ ->Reference > Release Notes > [All revisions](/t/11872) > [Revision 73](/t/11873) -# Revision 73 -Thursday, April 20, 2023 - -Dear community, - -We'd like to announce that Canonical's newest Charmed PostgreSQL operator for Kubernetes has been published in the `14/stable` [channel](https://charmhub.io/postgresql-k8s?channel=14/stable). :tada: - -If you are jumping over several stable revisions, make sure to check [previous release notes](/t/11872) before upgrading to this revision. - -## Features you can start using today - -* Deploying on Kubernetes (tested with MicroK8s, GKE) -* Scaling up/down in one simple juju command -* HA using [Patroni](https://github.com/zalando/patroni) -* Full backups and restores are supported when using any S3-compatible storage -* TLS support (using “[tls-certificates](https://charmhub.io/tls-certificates-operator)” operator) -* DB access outside of Juju using “[data-integrator](https://charmhub.io/data-integrator)” -* Data import using standard tools e.g. “[PostgreSQL Data Injector](https://charmhub.io/postgresql-data-k8s)” -* [Documentation](https://charmhub.io/postgresql-k8s?channel=14/stable) - -## Inside the charms - -* Charmed PostgreSQL K8s charm ships the latest PostgreSQL “14.7-0ubuntu0.22.04.1” -* K8s charms [based on our](https://github.com/orgs/canonical/packages?tab=packages&q=charmed) ROCK OCI (Ubuntu LTS “22.04” - ubuntu:22.04-based) -* Principal charms supports the latest LTS series “22.04” only. -* Subordinate charms support LTS “22.04” and “20.04” only. - -## Technical notes - -Compatibility with legacy charms: - * The new PostgreSQL charm is also a juju interface-compatible replacement for legacy PostgreSQL charms (using legacy interface `pgsql`, via endpoints `db` and `db-admin`). -However, **it is highly recommended to migrate to the modern interface [`postgresql_client`](https://github.com/canonical/charm-relation-interfaces)** (endpoint `database`). - * Please [contact us](#heading--contact) if you are considering migrating from other “legacy” charms not mentioned above. -* Charm PostgreSQL K8s charm follows the SNAP track “14” (through repackaed ROCK/OCI image). -* No “latest” track in use (no surprises in tracking “latest/stable”)! - * Charmed PostgreSQL K8s charms provide [legacy charm](/t/11013) through “latest/stable”. -* Charm lifecycle flowchart diagrams: [PostgreSQL](https://github.com/canonical/postgresql-k8s-operator/tree/main/docs/reference). -* Modern interfaces are well described in “[Interfaces catalogue](https://github.com/canonical/charm-relation-interfaces)” and implemented by '[data-platform-libs](https://github.com/canonical/data-platform-libs/)'. -* Known limitation: PostgreSQL extensions are not yet supported. - -## Contact us - -Charmed PostgreSQL K8s is an open source project that warmly welcomes community contributions, suggestions, fixes, and constructive feedback. - -* Raise software issues or feature requests on [**GitHub**](https://github.com/canonical/postgresql-k8s-operator/issues/new/choose) -* Report security issues through [**Launchpad**](https://wiki.ubuntu.com/DebuggingSecurity#How%20to%20File) -* Contact the Canonical Data Platform team through our [Matrix](https://matrix.to/#/#charmhub-data-platform:ubuntu.com) channel. - - \ No newline at end of file diff --git a/docs/reference/r-system-requirements.md b/docs/reference/r-system-requirements.md index d531811908..dcc5a63c7f 100644 --- a/docs/reference/r-system-requirements.md +++ b/docs/reference/r-system-requirements.md @@ -11,7 +11,7 @@ The charm supports several Juju releases from [2.9 LTS](https://juju.is/docs/juj | Juju major release | Supported minor versions | Compatible charm revisions |Comment | |:--------|:-----|:-----|:-----| -| ![3.6 LTS] | `3.6.0-beta2` | [280]+ | No known issues, but still in beta. Not recommended for production. | +| ![3.6 LTS] | `3.6.1+` | [444/445]+ | Recommended for production. | | [![3.5]](https://juju.is/docs/juju/roadmap#juju-juju-35) | `3.5.1+` | [280]+ | [Known Juju issue](https://bugs.launchpad.net/juju/+bug/2066517) in `3.5.0` | | [![3.4]](https://juju.is/docs/juju/roadmap#juju-juju-34) | `3.4.3+` | [280]+ | Know Juju issues with previous minor versions | | [![3.3]](https://juju.is/docs/juju/roadmap#juju-juju-33) | `3.3.0+` | from [177] to [193] | No known issues | @@ -61,4 +61,5 @@ At the moment IPv4 is supported only (see more [info](https://warthogs.atlassian [73]: /t/11873 [177]: /t/12668 [193]: /t/13208 -[280]: /t/14068 \ No newline at end of file +[280]: /t/14068 +[444/445]: /t/15966 \ No newline at end of file diff --git a/docs/tutorial/t-overview.md b/docs/tutorial.md similarity index 98% rename from docs/tutorial/t-overview.md rename to docs/tutorial.md index a1419dd5c2..6b0610e210 100644 --- a/docs/tutorial/t-overview.md +++ b/docs/tutorial.md @@ -1,4 +1,4 @@ -# Charmed PostgreSQL K8s Tutorial +# Tutorial This section of our documentation contains comprehensive, hands-on tutorials to help you learn how to deploy Charmed PostgreSQL K8s and become familiar with its available operations. diff --git a/docs/tutorial/t-set-up.md b/docs/tutorial/t-set-up.md index c098958846..f4cba5d7ef 100644 --- a/docs/tutorial/t-set-up.md +++ b/docs/tutorial/t-set-up.md @@ -47,7 +47,7 @@ All necessary components have been pre-installed inside the VM already, like Mic

Set up Juju

-Let's bootstrap Juju to use the local MicroK8s controller: +Let's bootstrap Juju to use the local MicroK8s controller. We will call it "overlord", but you can give it any name you'd like. ```shell juju bootstrap microk8s overlord ``` diff --git a/lib/charms/data_platform_libs/v0/data_interfaces.py b/lib/charms/data_platform_libs/v0/data_interfaces.py index 3bc2dd8503..9717119030 100644 --- a/lib/charms/data_platform_libs/v0/data_interfaces.py +++ b/lib/charms/data_platform_libs/v0/data_interfaces.py @@ -331,7 +331,7 @@ def _on_topic_requested(self, event: TopicRequestedEvent): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 40 +LIBPATCH = 41 PYDEPS = ["ops>=2.0.0"] @@ -609,7 +609,7 @@ def get_group(self, group: str) -> Optional[SecretGroup]: class CachedSecret: """Locally cache a secret. - The data structure is precisely re-using/simulating as in the actual Secret Storage + The data structure is precisely reusing/simulating as in the actual Secret Storage """ KNOWN_MODEL_ERRORS = [MODEL_ERRORS["no_label_and_uri"], MODEL_ERRORS["owner_no_refresh"]] @@ -2363,7 +2363,6 @@ def _update_relation_data(self, relation: Relation, data: Dict[str, str]) -> Non def _delete_relation_data(self, relation: Relation, fields: List[str]) -> None: """Delete data available (directily or indirectly -- i.e. secrets) from the relation for owner/this_app.""" if self.secret_fields and self.deleted_label: - _, normal_fields = self._process_secret_fields( relation, self.secret_fields, diff --git a/lib/charms/data_platform_libs/v0/data_models.py b/lib/charms/data_platform_libs/v0/data_models.py index a1dbb8299a..087f6f3c58 100644 --- a/lib/charms/data_platform_libs/v0/data_models.py +++ b/lib/charms/data_platform_libs/v0/data_models.py @@ -168,7 +168,7 @@ class MergedDataBag(ProviderDataBag, RequirerDataBag): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 5 PYDEPS = ["ops>=2.0.0", "pydantic>=1.10,<2"] @@ -209,7 +209,7 @@ def validate_params(cls: Type[T]): """ def decorator( - f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G] + f: Callable[[CharmBase, ActionEvent, Union[T, ValidationError]], G], ) -> Callable[[CharmBase, ActionEvent], G]: @wraps(f) def event_wrapper(self: CharmBase, event: ActionEvent): @@ -287,7 +287,7 @@ def decorator( Optional[Union[UnitModel, ValidationError]], ], G, - ] + ], ) -> Callable[[CharmBase, RelationEvent], G]: @wraps(f) def event_wrapper(self: CharmBase, event: RelationEvent): diff --git a/lib/charms/grafana_k8s/v0/grafana_dashboard.py b/lib/charms/grafana_k8s/v0/grafana_dashboard.py index dfc32ddcb5..d618c79917 100644 --- a/lib/charms/grafana_k8s/v0/grafana_dashboard.py +++ b/lib/charms/grafana_k8s/v0/grafana_dashboard.py @@ -157,7 +157,7 @@ def __init__(self, *args): self._on_dashboards_changed, ) -Dashboards can be retrieved the :meth:`dashboards`: +Dashboards can be retrieved via the `dashboards` method: It will be returned in the format of: @@ -175,7 +175,6 @@ def __init__(self, *args): The consuming charm should decompress the dashboard. """ -import base64 import hashlib import json import logging @@ -187,7 +186,7 @@ def __init__(self, *args): import tempfile import uuid from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Tuple import yaml from ops.charm import ( @@ -209,6 +208,7 @@ def __init__(self, *args): StoredState, ) from ops.model import Relation +from cosl import LZMABase64, DashboardPath40UID # The unique Charmhub library identifier, never change it LIBID = "c49eb9c7dfef40c7b6235ebd67010a3f" @@ -219,7 +219,9 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 36 +LIBPATCH = 42 + +PYDEPS = ["cosl >= 0.0.50"] logger = logging.getLogger(__name__) @@ -415,8 +417,7 @@ def __init__( self.expected_relation_interface = expected_relation_interface self.actual_relation_interface = actual_relation_interface self.message = ( - "The '{}' relation has '{}' as " - "interface rather than the expected '{}'".format( + "The '{}' relation has '{}' as " "interface rather than the expected '{}'".format( relation_name, actual_relation_interface, expected_relation_interface ) ) @@ -544,357 +545,486 @@ def _validate_relation_by_interface_and_direction( raise Exception("Unexpected RelationDirection: {}".format(expected_relation_role)) -def _encode_dashboard_content(content: Union[str, bytes]) -> str: - if isinstance(content, str): - content = bytes(content, "utf-8") +class CharmedDashboard: + """A helper class for handling dashboards on the requirer (Grafana) side.""" - return base64.b64encode(lzma.compress(content)).decode("utf-8") + @classmethod + def _convert_dashboard_fields(cls, content: str, inject_dropdowns: bool = True) -> str: + """Make sure values are present for Juju topology. + Inserts Juju topology variables and selectors into the template, as well as + a variable for Prometheus. + """ + dict_content = json.loads(content) + datasources = {} + existing_templates = False + + template_dropdowns = ( + TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS # type: ignore + if inject_dropdowns + else DATASOURCE_TEMPLATE_DROPDOWNS + ) -def _decode_dashboard_content(encoded_content: str) -> str: - return lzma.decompress(base64.b64decode(encoded_content.encode("utf-8"))).decode() + # If the dashboard has __inputs, get the names to replace them. These are stripped + # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in + # newer charms may import them directly from the marketplace + if "__inputs" in dict_content: + for field in dict_content["__inputs"]: + if "type" in field and field["type"] == "datasource": + datasources[field["name"]] = field["pluginName"].lower() + del dict_content["__inputs"] + + # If no existing template variables exist, just insert our own + if "templating" not in dict_content: + dict_content["templating"] = {"list": list(template_dropdowns)} # type: ignore + else: + # Otherwise, set a flag so we can go back later + existing_templates = True + for template_value in dict_content["templating"]["list"]: + # Build a list of `datasource_name`: `datasource_type` mappings + # The "query" field is actually "prometheus", "loki", "influxdb", etc + if "type" in template_value and template_value["type"] == "datasource": + datasources[template_value["name"]] = template_value["query"].lower() + + # Put our own variables in the template + for d in template_dropdowns: # type: ignore + if d not in dict_content["templating"]["list"]: + dict_content["templating"]["list"].insert(0, d) + + dict_content = cls._replace_template_fields(dict_content, datasources, existing_templates) + return json.dumps(dict_content) + @classmethod + def _replace_template_fields( # noqa: C901 + cls, dict_content: dict, datasources: dict, existing_templates: bool + ) -> dict: + """Make templated fields get cleaned up afterwards. -def _convert_dashboard_fields(content: str, inject_dropdowns: bool = True) -> str: - """Make sure values are present for Juju topology. + If existing datasource variables are present, try to substitute them. + """ + replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"} + used_replacements = [] # type: List[str] + + # If any existing datasources match types we know, or we didn't find + # any templating variables at all, template them. + if datasources or not existing_templates: + panels = dict_content.get("panels", {}) + if panels: + dict_content["panels"] = cls._template_panels( + panels, replacements, used_replacements, existing_templates, datasources + ) - Inserts Juju topology variables and selectors into the template, as well as - a variable for Prometheus. - """ - dict_content = json.loads(content) - datasources = {} - existing_templates = False - - template_dropdowns = ( - TOPOLOGY_TEMPLATE_DROPDOWNS + DATASOURCE_TEMPLATE_DROPDOWNS # type: ignore - if inject_dropdowns - else DATASOURCE_TEMPLATE_DROPDOWNS - ) + # Find panels nested under rows + rows = dict_content.get("rows", {}) + if rows: + for row_idx, row in enumerate(rows): + if "panels" in row.keys(): + rows[row_idx]["panels"] = cls._template_panels( + row["panels"], + replacements, + used_replacements, + existing_templates, + datasources, + ) + + dict_content["rows"] = rows + + # Finally, go back and pop off the templates we stubbed out + deletions = [] + for tmpl in dict_content["templating"]["list"]: + if tmpl["name"] and tmpl["name"] in used_replacements: + # it might happen that existing template var name is the same as the one we insert (i.e prometheusds or lokids) + # in that case, we want to pop the existing one only. + if tmpl not in DATASOURCE_TEMPLATE_DROPDOWNS: + deletions.append(tmpl) + + for d in deletions: + dict_content["templating"]["list"].remove(d) + + return dict_content + + @classmethod + def _template_panels( + cls, + panels: dict, + replacements: dict, + used_replacements: list, + existing_templates: bool, + datasources: dict, + ) -> dict: + """Iterate through a `panels` object and template it appropriately.""" + # Go through all the panels. If they have a datasource set, AND it's one + # that we can convert to ${lokids} or ${prometheusds}, by stripping off the + # ${} templating and comparing the name to the list we built, replace it, + # otherwise, leave it alone. + # + for panel in panels: + if "datasource" not in panel or not panel.get("datasource"): + continue + if not existing_templates: + datasource = panel.get("datasource") + if isinstance(datasource, str): + if "loki" in datasource: + panel["datasource"] = "${lokids}" + elif "grafana" in datasource: + continue + else: + panel["datasource"] = "${prometheusds}" + elif isinstance(datasource, dict): + # In dashboards exported by Grafana 9, datasource type is dict + dstype = datasource.get("type", "") + if dstype == "loki": + panel["datasource"]["uid"] = "${lokids}" + elif dstype == "prometheus": + panel["datasource"]["uid"] = "${prometheusds}" + else: + logger.debug("Unrecognized datasource type '%s'; skipping", dstype) + continue + else: + logger.error("Unknown datasource format: skipping") + continue + else: + if isinstance(panel["datasource"], str): + if panel["datasource"].lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"] = replacement or panel["datasource"] + elif isinstance(panel["datasource"], dict): + dstype = panel["datasource"].get("type", "") + if panel["datasource"].get("uid", "").lower() in replacements.values(): + # Already a known template variable + continue + # Strip out variable characters and maybe braces + ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) + + if ds not in datasources.keys(): + # Unknown, non-templated datasource, potentially a Grafana builtin + continue + + replacement = replacements.get(datasources[ds], "") + if replacement: + used_replacements.append(ds) + panel["datasource"]["uid"] = replacement + else: + logger.error("Unknown datasource format: skipping") + continue + return panels - # If the dashboard has __inputs, get the names to replace them. These are stripped - # from reactive dashboards in GrafanaDashboardAggregator, but charm authors in - # newer charms may import them directly from the marketplace - if "__inputs" in dict_content: - for field in dict_content["__inputs"]: - if "type" in field and field["type"] == "datasource": - datasources[field["name"]] = field["pluginName"].lower() - del dict_content["__inputs"] - - # If no existing template variables exist, just insert our own - if "templating" not in dict_content: - dict_content["templating"] = {"list": list(template_dropdowns)} # type: ignore - else: - # Otherwise, set a flag so we can go back later - existing_templates = True - for template_value in dict_content["templating"]["list"]: - # Build a list of `datasource_name`: `datasource_type` mappings - # The "query" field is actually "prometheus", "loki", "influxdb", etc - if "type" in template_value and template_value["type"] == "datasource": - datasources[template_value["name"]] = template_value["query"].lower() + @classmethod + def _inject_labels(cls, content: str, topology: dict, transformer: "CosTool") -> str: + """Inject Juju topology into panel expressions via CosTool. - # Put our own variables in the template - for d in template_dropdowns: # type: ignore - if d not in dict_content["templating"]["list"]: - dict_content["templating"]["list"].insert(0, d) + A dashboard will have a structure approximating: + { + "__inputs": [], + "templating": { + "list": [ + { + "name": "prometheusds", + "type": "prometheus" + } + ] + }, + "panels": [ + { + "foo": "bar", + "targets": [ + { + "some": "field", + "expr": "up{job="foo"}" + }, + { + "some_other": "field", + "expr": "sum(http_requests_total{instance="$foo"}[5m])} + } + ], + "datasource": "${someds}" + } + ] + } - dict_content = _replace_template_fields(dict_content, datasources, existing_templates) - return json.dumps(dict_content) + `templating` is used elsewhere in this library, but the structure is not rigid. It is + not guaranteed that a panel will actually have any targets (it could be a "spacer" with + no datasource, hence no expression). It could have only one target. It could have multiple + targets. It could have multiple targets of which only one has an `expr` to evaluate. We need + to try to handle all of these concisely. + `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general) + does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user + interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for + five years for a panel query would be unusual). -def _replace_template_fields( # noqa: C901 - dict_content: dict, datasources: dict, existing_templates: bool -) -> dict: - """Make templated fields get cleaned up afterwards. + Args: + content: dashboard content as a string + topology: a dict containing topology values + transformer: a 'CosTool' instance + Returns: + dashboard content with replaced values. + """ + dict_content = json.loads(content) - If existing datasource variables are present, try to substitute them. - """ - replacements = {"loki": "${lokids}", "prometheus": "${prometheusds}"} - used_replacements = [] # type: List[str] - - # If any existing datasources match types we know, or we didn't find - # any templating variables at all, template them. - if datasources or not existing_templates: - panels = dict_content.get("panels", {}) - if panels: - dict_content["panels"] = _template_panels( - panels, replacements, used_replacements, existing_templates, datasources - ) + if "panels" not in dict_content.keys(): + return json.dumps(dict_content) - # Find panels nested under rows - rows = dict_content.get("rows", {}) - if rows: - for row_idx, row in enumerate(rows): - if "panels" in row.keys(): - rows[row_idx]["panels"] = _template_panels( - row["panels"], - replacements, - used_replacements, - existing_templates, - datasources, - ) - - dict_content["rows"] = rows - - # Finally, go back and pop off the templates we stubbed out - deletions = [] - for tmpl in dict_content["templating"]["list"]: - if tmpl["name"] and tmpl["name"] in used_replacements: - deletions.append(tmpl) - - for d in deletions: - dict_content["templating"]["list"].remove(d) - - return dict_content - - -def _template_panels( - panels: dict, - replacements: dict, - used_replacements: list, - existing_templates: bool, - datasources: dict, -) -> dict: - """Iterate through a `panels` object and template it appropriately.""" - # Go through all the panels. If they have a datasource set, AND it's one - # that we can convert to ${lokids} or ${prometheusds}, by stripping off the - # ${} templating and comparing the name to the list we built, replace it, - # otherwise, leave it alone. - # - for panel in panels: - if "datasource" not in panel or not panel.get("datasource"): - continue - if not existing_templates: - datasource = panel.get("datasource") - if isinstance(datasource, str): - if "loki" in datasource: - panel["datasource"] = "${lokids}" - elif "grafana" in datasource: - continue - else: - panel["datasource"] = "${prometheusds}" - elif isinstance(datasource, dict): - # In dashboards exported by Grafana 9, datasource type is dict - dstype = datasource.get("type", "") - if dstype == "loki": - panel["datasource"]["uid"] = "${lokids}" - elif dstype == "prometheus": - panel["datasource"]["uid"] = "${prometheusds}" - else: - logger.debug("Unrecognized datasource type '%s'; skipping", dstype) - continue - else: - logger.error("Unknown datasource format: skipping") + # Go through all the panels and inject topology labels + # Panels may have more than one 'target' where the expressions live, so that must be + # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle + # expressions with range queries including variables. Exclude these. + # + # It is not a certainty that the `datasource` field will necessarily reflect the type, so + # operate on all fields. + panels = dict_content["panels"] + topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()} + + # We need to use an index so we can insert the changed element back later + for panel_idx, panel in enumerate(panels): + if not isinstance(panel, dict): continue - else: - if isinstance(panel["datasource"], str): - if panel["datasource"].lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"]) - if ds not in datasources.keys(): - # Unknown, non-templated datasource, potentially a Grafana builtin - continue + # Use the index to insert it back in the same location + panels[panel_idx] = cls._modify_panel(panel, topology_with_prefix, transformer) - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"] = replacement or panel["datasource"] - elif isinstance(panel["datasource"], dict): - dstype = panel["datasource"].get("type", "") - if panel["datasource"].get("uid", "").lower() in replacements.values(): - # Already a known template variable - continue - # Strip out variable characters and maybe braces - ds = re.sub(r"(\$|\{|\})", "", panel["datasource"].get("uid", "")) + return json.dumps(dict_content) - if ds not in datasources.keys(): - # Unknown, non-templated datasource, potentially a Grafana builtin - continue + @classmethod + def _modify_panel(cls, panel: dict, topology: dict, transformer: "CosTool") -> dict: + """Inject Juju topology into panel expressions via CosTool. - replacement = replacements.get(datasources[ds], "") - if replacement: - used_replacements.append(ds) - panel["datasource"]["uid"] = replacement - else: - logger.error("Unknown datasource format: skipping") - continue - return panels + Args: + panel: a dashboard panel as a dict + topology: a dict containing topology values + transformer: a 'CosTool' instance + Returns: + the panel with injected values + """ + if "targets" not in panel.keys(): + return panel + # Pre-compile a regular expression to grab values from inside of [] + range_re = re.compile(r"\[(?P.*?)\]") + # Do the same for any offsets + offset_re = re.compile(r"offset\s+(?P-?\s*[$\w]+)") -def _inject_labels(content: str, topology: dict, transformer: "CosTool") -> str: - """Inject Juju topology into panel expressions via CosTool. + known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"} - A dashboard will have a structure approximating: - { - "__inputs": [], - "templating": { - "list": [ - { - "name": "prometheusds", - "type": "prometheus" - } - ] - }, - "panels": [ - { - "foo": "bar", - "targets": [ - { - "some": "field", - "expr": "up{job="foo"}" - }, - { - "some_other": "field", - "expr": "sum(http_requests_total{instance="$foo"}[5m])} - } - ], - "datasource": "${someds}" - } - ] - } + targets = panel["targets"] - `templating` is used elsewhere in this library, but the structure is not rigid. It is - not guaranteed that a panel will actually have any targets (it could be a "spacer" with - no datasource, hence no expression). It could have only one target. It could have multiple - targets. It could have multiple targets of which only one has an `expr` to evaluate. We need - to try to handle all of these concisely. + # We need to use an index so we can insert the changed element back later + for idx, target in enumerate(targets): + # If there's no expression, we don't need to do anything + if "expr" not in target.keys(): + continue + expr = target["expr"] - `cos-tool` (`github.com/canonical/cos-tool` as a Go module in general) - does not know "Grafana-isms", such as using `[$_variable]` to modify the query from the user - interface, so we add placeholders (as `5y`, since it must parse, but a dashboard looking for - five years for a panel query would be unusual). + if "datasource" not in panel.keys(): + continue - Args: - content: dashboard content as a string - topology: a dict containing topology values - transformer: a 'CosTool' instance - Returns: - dashboard content with replaced values. - """ - dict_content = json.loads(content) + if isinstance(panel["datasource"], str): + if panel["datasource"] not in known_datasources: + continue + querytype = known_datasources[panel["datasource"]] + elif isinstance(panel["datasource"], dict): + if panel["datasource"]["uid"] not in known_datasources: + continue + querytype = known_datasources[panel["datasource"]["uid"]] + else: + logger.error("Unknown datasource format: skipping") + continue - if "panels" not in dict_content.keys(): - return json.dumps(dict_content) + # Capture all values inside `[]` into a list which we'll iterate over later to + # put them back in-order. Then apply the regex again and replace everything with + # `[5y]` so promql/parser will take it. + # + # Then do it again for offsets + range_values = [m.group("value") for m in range_re.finditer(expr)] + expr = range_re.sub(r"[5y]", expr) + + offset_values = [m.group("value") for m in offset_re.finditer(expr)] + expr = offset_re.sub(r"offset 5y", expr) + # Retrieve the new expression (which may be unchanged if there were no label + # matchers in the expression, or if tt was unable to be parsed like logql. It's + # virtually impossible to tell from any datasource "name" in a panel what the + # actual type is without re-implementing a complete dashboard parser, but no + # harm will some from passing invalid promql -- we'll just get the original back. + # + replacement = transformer.inject_label_matchers(expr, topology, querytype) - # Go through all the panels and inject topology labels - # Panels may have more than one 'target' where the expressions live, so that must be - # accounted for. Additionally, `promql-transform` does not necessarily gracefully handle - # expressions with range queries including variables. Exclude these. - # - # It is not a certainty that the `datasource` field will necessarily reflect the type, so - # operate on all fields. - panels = dict_content["panels"] - topology_with_prefix = {"juju_{}".format(k): v for k, v in topology.items()} + if replacement == target["expr"]: + # promql-transform caught an error. Move on + continue - # We need to use an index so we can insert the changed element back later - for panel_idx, panel in enumerate(panels): - if not isinstance(panel, dict): - continue + # Go back and substitute values in [] which were pulled out + # Enumerate with an index... again. The same regex is ok, since it will still match + # `[(.*?)]`, which includes `[5y]`, our placeholder + for i, match in enumerate(range_re.finditer(replacement)): + # Replace one-by-one, starting from the left. We build the string back with + # `str.replace(string_to_replace, replacement_value, count)`. Limit the count + # to one, since we are going through one-by-one through the list we saved earlier + # in `range_values`. + replacement = replacement.replace( + "[{}]".format(match.group("value")), + "[{}]".format(range_values[i]), + 1, + ) - # Use the index to insert it back in the same location - panels[panel_idx] = _modify_panel(panel, topology_with_prefix, transformer) + for i, match in enumerate(offset_re.finditer(replacement)): + # Replace one-by-one, starting from the left. We build the string back with + # `str.replace(string_to_replace, replacement_value, count)`. Limit the count + # to one, since we are going through one-by-one through the list we saved earlier + # in `range_values`. + replacement = replacement.replace( + "offset {}".format(match.group("value")), + "offset {}".format(offset_values[i]), + 1, + ) - return json.dumps(dict_content) + # Use the index to insert it back in the same location + targets[idx]["expr"] = replacement + panel["targets"] = targets + return panel -def _modify_panel(panel: dict, topology: dict, transformer: "CosTool") -> dict: - """Inject Juju topology into panel expressions via CosTool. + @classmethod + def _content_to_dashboard_object( + cls, + *, + charm_name, + content: str, + juju_topology: dict, + inject_dropdowns: bool = True, + dashboard_alt_uid: Optional[str] = None, + ) -> Dict: + """Helper method for keeping a consistent stored state schema for the dashboard and some metadata. - Args: - panel: a dashboard panel as a dict - topology: a dict containing topology values - transformer: a 'CosTool' instance - Returns: - the panel with injected values - """ - if "targets" not in panel.keys(): - return panel + Args: + charm_name: Charm name (although the aggregator passes the app name). + content: The compressed dashboard. + juju_topology: This is not actually used in the dashboards, but is present to provide a secondary + salt to ensure uniqueness in the dict keys in case individual charm units provide dashboards. + inject_dropdowns: Whether to auto-render topology dropdowns. + dashboard_alt_uid: Alternative uid used for dashboards added programmatically. + """ + ret = { + "charm": charm_name, + "content": content, + "juju_topology": juju_topology if inject_dropdowns else {}, + "inject_dropdowns": inject_dropdowns, + } - # Pre-compile a regular expression to grab values from inside of [] - range_re = re.compile(r"\[(?P.*?)\]") - # Do the same for any offsets - offset_re = re.compile(r"offset\s+(?P-?\s*[$\w]+)") + if dashboard_alt_uid is not None: + ret["dashboard_alt_uid"] = dashboard_alt_uid - known_datasources = {"${prometheusds}": "promql", "${lokids}": "logql"} + return ret - targets = panel["targets"] + @classmethod + def _generate_alt_uid(cls, charm_name: str, key: str) -> str: + """Generate alternative uid for dashboards. - # We need to use an index so we can insert the changed element back later - for idx, target in enumerate(targets): - # If there's no expression, we don't need to do anything - if "expr" not in target.keys(): - continue - expr = target["expr"] + Args: + charm_name: The name of the charm (not app; from metadata). + key: A string used (along with charm.meta.name) to build the hash uid. - if "datasource" not in panel.keys(): - continue + Returns: A hash string. + """ + raw_dashboard_alt_uid = "{}-{}".format(charm_name, key) + return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8) - if isinstance(panel["datasource"], str): - if panel["datasource"] not in known_datasources: - continue - querytype = known_datasources[panel["datasource"]] - elif isinstance(panel["datasource"], dict): - if panel["datasource"]["uid"] not in known_datasources: - continue - querytype = known_datasources[panel["datasource"]["uid"]] + @classmethod + def _replace_uid( + cls, *, dashboard_dict: dict, dashboard_path: Path, charm_dir: Path, charm_name: str + ): + # If we're running this from within an aggregator (such as grafana agent), then the uid was + # already rendered there, so we do not want to overwrite it with a uid generated from aggregator's info. + # We overwrite the uid only if it's not a valid "Path40" uid. + if not DashboardPath40UID.is_valid(original_uid := dashboard_dict.get("uid", "")): + rel_path = str( + dashboard_path.relative_to(charm_dir) + if dashboard_path.is_absolute() + else dashboard_path + ) + dashboard_dict["uid"] = DashboardPath40UID.generate(charm_name, rel_path) + logger.debug( + "Processed dashboard '%s': replaced original uid '%s' with '%s'", + dashboard_path, + original_uid, + dashboard_dict["uid"], + ) else: - logger.error("Unknown datasource format: skipping") - continue + logger.debug( + "Processed dashboard '%s': kept original uid '%s'", dashboard_path, original_uid + ) - # Capture all values inside `[]` into a list which we'll iterate over later to - # put them back in-order. Then apply the regex again and replace everything with - # `[5y]` so promql/parser will take it. - # - # Then do it again for offsets - range_values = [m.group("value") for m in range_re.finditer(expr)] - expr = range_re.sub(r"[5y]", expr) - - offset_values = [m.group("value") for m in offset_re.finditer(expr)] - expr = offset_re.sub(r"offset 5y", expr) - # Retrieve the new expression (which may be unchanged if there were no label - # matchers in the expression, or if tt was unable to be parsed like logql. It's - # virtually impossible to tell from any datasource "name" in a panel what the - # actual type is without re-implementing a complete dashboard parser, but no - # harm will some from passing invalid promql -- we'll just get the original back. - # - replacement = transformer.inject_label_matchers(expr, topology, querytype) - - if replacement == target["expr"]: - # promql-tranform caught an error. Move on - continue - - # Go back and substitute values in [] which were pulled out - # Enumerate with an index... again. The same regex is ok, since it will still match - # `[(.*?)]`, which includes `[5y]`, our placeholder - for i, match in enumerate(range_re.finditer(replacement)): - # Replace one-by-one, starting from the left. We build the string back with - # `str.replace(string_to_replace, replacement_value, count)`. Limit the count - # to one, since we are going through one-by-one through the list we saved earlier - # in `range_values`. - replacement = replacement.replace( - "[{}]".format(match.group("value")), - "[{}]".format(range_values[i]), - 1, + @classmethod + def _add_tags(cls, dashboard_dict: dict, charm_name: str): + tags: List[str] = dashboard_dict.get("tags", []) + if not any(tag.startswith("charm: ") for tag in tags): + tags.append(f"charm: {charm_name}") + dashboard_dict["tags"] = tags + + @classmethod + def load_dashboards_from_dir( + cls, + *, + dashboards_path: Path, + charm_name: str, + charm_dir: Path, + inject_dropdowns: bool, + juju_topology: dict, + path_filter: Callable[[Path], bool] = lambda p: True, + ) -> dict: + """Load dashboards files from directory into a mapping from "dashboard id" to a so-called "dashboard object".""" + + # Path.glob uses fnmatch on the backend, which is pretty limited, so use a + # custom function for the filter + def _is_dashboard(p: Path) -> bool: + return ( + p.is_file() + and p.name.endswith((".json", ".json.tmpl", ".tmpl")) + and path_filter(p) ) - for i, match in enumerate(offset_re.finditer(replacement)): - # Replace one-by-one, starting from the left. We build the string back with - # `str.replace(string_to_replace, replacement_value, count)`. Limit the count - # to one, since we are going through one-by-one through the list we saved earlier - # in `range_values`. - replacement = replacement.replace( - "offset {}".format(match.group("value")), - "offset {}".format(offset_values[i]), - 1, + dashboard_templates = {} + + for path in filter(_is_dashboard, Path(dashboards_path).glob("*")): + try: + dashboard_dict = json.loads(path.read_bytes()) + except json.JSONDecodeError as e: + logger.error("Failed to load dashboard '%s': %s", path, e) + continue + if type(dashboard_dict) is not dict: + logger.error( + "Invalid dashboard '%s': expected dict, got %s", path, type(dashboard_dict) + ) + + cls._replace_uid( + dashboard_dict=dashboard_dict, + dashboard_path=path, + charm_dir=charm_dir, + charm_name=charm_name, ) - # Use the index to insert it back in the same location - targets[idx]["expr"] = replacement + cls._add_tags(dashboard_dict=dashboard_dict, charm_name=charm_name) - panel["targets"] = targets - return panel + id = "file:{}".format(path.stem) + dashboard_templates[id] = cls._content_to_dashboard_object( + charm_name=charm_name, + content=LZMABase64.compress(json.dumps(dashboard_dict)), + dashboard_alt_uid=cls._generate_alt_uid(charm_name, id), + inject_dropdowns=inject_dropdowns, + juju_topology=juju_topology, + ) + + return dashboard_templates def _type_convert_stored(obj): @@ -1075,16 +1205,19 @@ def add_dashboard(self, content: str, inject_dropdowns: bool = True) -> None: # that the stored state is there when this unit becomes leader. stored_dashboard_templates: Any = self._stored.dashboard_templates # pyright: ignore - encoded_dashboard = _encode_dashboard_content(content) + encoded_dashboard = LZMABase64.compress(content) # Use as id the first chars of the encoded dashboard, so that # it is predictable across units. id = "prog:{}".format(encoded_dashboard[-24:-16]) - stored_dashboard_templates[id] = self._content_to_dashboard_object( - encoded_dashboard, inject_dropdowns + stored_dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object( + charm_name=self._charm.meta.name, + content=encoded_dashboard, + dashboard_alt_uid=CharmedDashboard._generate_alt_uid(self._charm.meta.name, id), + inject_dropdowns=inject_dropdowns, + juju_topology=self._juju_topology, ) - stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id) if self._charm.unit.is_leader(): for dashboard_relation in self._charm.model.relations[self._relation_name]: @@ -1127,38 +1260,22 @@ def _update_all_dashboards_from_dir( if dashboard_id.startswith("file:"): del stored_dashboard_templates[dashboard_id] - # Path.glob uses fnmatch on the backend, which is pretty limited, so use a - # custom function for the filter - def _is_dashboard(p: Path) -> bool: - return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) - - for path in filter(_is_dashboard, Path(self._dashboards_path).glob("*")): - # path = Path(path) - id = "file:{}".format(path.stem) - stored_dashboard_templates[id] = self._content_to_dashboard_object( - _encode_dashboard_content(path.read_bytes()), inject_dropdowns + stored_dashboard_templates.update( + CharmedDashboard.load_dashboards_from_dir( + dashboards_path=Path(self._dashboards_path), + charm_name=self._charm.meta.name, + charm_dir=self._charm.charm_dir, + inject_dropdowns=inject_dropdowns, + juju_topology=self._juju_topology, ) - stored_dashboard_templates[id]["dashboard_alt_uid"] = self._generate_alt_uid(id) - - self._stored.dashboard_templates = stored_dashboard_templates + ) if self._charm.unit.is_leader(): for dashboard_relation in self._charm.model.relations[self._relation_name]: self._upset_dashboards_on_relation(dashboard_relation) - def _generate_alt_uid(self, key: str) -> str: - """Generate alternative uid for dashboards. - - Args: - key: A string used (along with charm.meta.name) to build the hash uid. - - Returns: A hash string. - """ - raw_dashboard_alt_uid = "{}-{}".format(self._charm.meta.name, key) - return hashlib.shake_256(raw_dashboard_alt_uid.encode("utf-8")).hexdigest(8) - def _reinitialize_dashboard_data(self, inject_dropdowns: bool = True) -> None: - """Triggers a reload of dashboard outside of an eventing workflow. + """Triggers a reload of dashboard outside an eventing workflow. Args: inject_dropdowns: a :bool: used to indicate whether topology dropdowns should be added @@ -1231,17 +1348,6 @@ def _upset_dashboards_on_relation(self, relation: Relation) -> None: relation.data[self._charm.app]["dashboards"] = json.dumps(stored_data) - def _content_to_dashboard_object(self, content: str, inject_dropdowns: bool = True) -> Dict: - return { - "charm": self._charm.meta.name, - "content": content, - "juju_topology": self._juju_topology if inject_dropdowns else {}, - "inject_dropdowns": inject_dropdowns, - } - - # This is not actually used in the dashboards, but is present to provide a secondary - # salt to ensure uniqueness in the dict keys in case individual charm units provide - # dashboards @property def _juju_topology(self) -> Dict: return { @@ -1306,7 +1412,7 @@ def __init__( super().__init__(charm, relation_name) self._charm = charm self._relation_name = relation_name - self._tranformer = CosTool(self._charm) + self._transformer = CosTool(self._charm) self._stored.set_default(dashboards={}) # type: ignore @@ -1436,21 +1542,21 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # error = None topology = template.get("juju_topology", {}) try: - content = _decode_dashboard_content(template["content"]) + content = LZMABase64.decompress(template["content"]) inject_dropdowns = template.get("inject_dropdowns", True) content = self._manage_dashboard_uid(content, template) - content = _convert_dashboard_fields(content, inject_dropdowns) + content = CharmedDashboard._convert_dashboard_fields(content, inject_dropdowns) if topology: - content = _inject_labels(content, topology, self._tranformer) + content = CharmedDashboard._inject_labels(content, topology, self._transformer) - content = _encode_dashboard_content(content) + content = LZMABase64.compress(content) except lzma.LZMAError as e: error = str(e) relation_has_invalid_dashboards = True except json.JSONDecodeError as e: error = str(e.msg) - logger.warning("Invalid JSON in Grafana dashboard: {}".format(fname)) + logger.warning("Invalid JSON in Grafana dashboard '{}': {}".format(fname, error)) continue # Prepend the relation name and ID to the dashboard ID to avoid clashes with @@ -1506,7 +1612,7 @@ def _render_dashboards_and_signal_changed(self, relation: Relation) -> bool: # if not coerced_data == stored_data: stored_dashboards = self.get_peer_data("dashboards") - stored_dashboards[relation.id] = stored_data + stored_dashboards[str(relation.id)] = stored_data self.set_peer_data("dashboards", stored_dashboards) return True return None # type: ignore @@ -1533,7 +1639,7 @@ def _to_external_object(self, relation_id, dashboard): "id": dashboard["original_id"], "relation_id": relation_id, "charm": dashboard["template"]["charm"], - "content": _decode_dashboard_content(dashboard["content"]), + "content": LZMABase64.decompress(dashboard["content"]), } @property @@ -1570,8 +1676,10 @@ def set_peer_data(self, key: str, data: Any) -> None: def get_peer_data(self, key: str) -> Any: """Retrieve information from the peer data bucket instead of `StoredState`.""" - data = self._charm.peers.data[self._charm.app].get(key, "") # type: ignore[attr-defined] - return json.loads(data) if data else {} + if rel := self._charm.peers: # type: ignore[attr-defined] + data = rel.data[self._charm.app].get(key, "") + return json.loads(data) if data else {} + return {} class GrafanaDashboardAggregator(Object): @@ -1662,8 +1770,11 @@ def _upset_dashboards_on_event(self, event: RelationEvent) -> None: return for id in dashboards: - self._stored.dashboard_templates[id] = self._content_to_dashboard_object( # type: ignore - dashboards[id], event + self._stored.dashboard_templates[id] = CharmedDashboard._content_to_dashboard_object( # type: ignore + charm_name=event.app.name, + content=dashboards[id], + inject_dropdowns=True, + juju_topology=self._hybrid_topology(event), ) self._stored.id_mappings[event.app.name] = dashboards # type: ignore @@ -1824,7 +1935,7 @@ def _handle_reactive_dashboards(self, event: RelationEvent) -> Optional[Dict]: from jinja2 import DebugUndefined, Template - content = _encode_dashboard_content( + content = LZMABase64.compress( Template(dash, undefined=DebugUndefined).render(datasource=r"${prometheusds}") # type: ignore ) id = "prog:{}".format(content[-24:-16]) @@ -1855,32 +1966,20 @@ def _maybe_get_builtin_dashboards(self, event: RelationEvent) -> Dict: ) if dashboards_path: - - def is_dashboard(p: Path) -> bool: - return p.is_file() and p.name.endswith((".json", ".json.tmpl", ".tmpl")) - - for path in filter(is_dashboard, Path(dashboards_path).glob("*")): - # path = Path(path) - if event.app.name in path.name: # type: ignore - id = "file:{}".format(path.stem) - builtins[id] = self._content_to_dashboard_object( - _encode_dashboard_content(path.read_bytes()), event - ) + builtins.update( + CharmedDashboard.load_dashboards_from_dir( + dashboards_path=Path(dashboards_path), + charm_name=event.app.name, + charm_dir=self._charm.charm_dir, + inject_dropdowns=True, + juju_topology=self._hybrid_topology(event), + path_filter=lambda path: event.app.name in path.name, + ) + ) return builtins - def _content_to_dashboard_object(self, content: str, event: RelationEvent) -> Dict: - return { - "charm": event.app.name, # type: ignore - "content": content, - "juju_topology": self._juju_topology(event), - "inject_dropdowns": True, - } - - # This is not actually used in the dashboards, but is present to provide a secondary - # salt to ensure uniqueness in the dict keys in case individual charm units provide - # dashboards - def _juju_topology(self, event: RelationEvent) -> Dict: + def _hybrid_topology(self, event: RelationEvent) -> Dict: return { "model": self._charm.model.name, "model_uuid": self._charm.model.uuid, @@ -1999,12 +2098,9 @@ def _get_tool_path(self) -> Optional[Path]: arch = "amd64" if arch == "x86_64" else arch res = "cos-tool-{}".format(arch) try: - path = Path(res).resolve() - path.chmod(0o777) + path = Path(res).resolve(strict=True) return path - except NotImplementedError: - logger.debug("System lacks support for chmod") - except FileNotFoundError: + except (FileNotFoundError, OSError): logger.debug('Could not locate cos-tool at: "{}"'.format(res)) return None diff --git a/lib/charms/loki_k8s/v1/loki_push_api.py b/lib/charms/loki_k8s/v1/loki_push_api.py index d75cb7e199..93aa63c939 100644 --- a/lib/charms/loki_k8s/v1/loki_push_api.py +++ b/lib/charms/loki_k8s/v1/loki_push_api.py @@ -9,7 +9,7 @@ This document explains how to use the two principal objects this library provides: - `LokiPushApiProvider`: This object is meant to be used by any Charmed Operator that needs to -implement the provider side of the `loki_push_api` relation interface. For instance, a Loki charm. +implement the provider side of the `loki_push_api` relation interface: for instance, a Loki charm. The provider side of the relation represents the server side, to which logs are being pushed. - `LokiPushApiConsumer`: This object is meant to be used by any Charmed Operator that needs to @@ -533,7 +533,7 @@ def __init__(self, ...): RelationRole, WorkloadEvent, ) -from ops.framework import EventBase, EventSource, Object, ObjectEvents +from ops.framework import BoundEvent, EventBase, EventSource, Object, ObjectEvents from ops.jujuversion import JujuVersion from ops.model import Container, ModelError, Relation from ops.pebble import APIError, ChangeError, Layer, PathError, ProtocolError @@ -546,7 +546,7 @@ def __init__(self, ...): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 13 +LIBPATCH = 15 PYDEPS = ["cosl"] @@ -1543,10 +1543,13 @@ def __init__( alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, recursive: bool = False, skip_alert_topology_labeling: bool = False, + *, + forward_alert_rules: bool = True, ): super().__init__(charm, relation_name) self._charm = charm self._relation_name = relation_name + self._forward_alert_rules = forward_alert_rules self.topology = JujuTopology.from_charm(charm) try: @@ -1569,7 +1572,8 @@ def _handle_alert_rules(self, relation): alert_rules = ( AlertRules(None) if self._skip_alert_topology_labeling else AlertRules(self.topology) ) - alert_rules.add_path(self._alert_rules_path, recursive=self._recursive) + if self._forward_alert_rules: + alert_rules.add_path(self._alert_rules_path, recursive=self._recursive) alert_rules_as_dict = alert_rules.as_dict() relation.data[self._charm.app]["metadata"] = json.dumps(self.topology.as_dict()) @@ -1617,6 +1621,9 @@ def __init__( alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, recursive: bool = True, skip_alert_topology_labeling: bool = False, + *, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + forward_alert_rules: bool = True, ): """Construct a Loki charm client. @@ -1642,6 +1649,9 @@ def __init__( alert_rules_path: a string indicating a path where alert rules can be found recursive: Whether to scan for rule files recursively. skip_alert_topology_labeling: whether to skip the alert topology labeling. + forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules. + refresh_event: an optional bound event or list of bound events which + will be observed to re-set scrape job data (IP address and others) Raises: RelationNotFoundError: If there is no relation in the charm's metadata.yaml @@ -1667,14 +1677,26 @@ def __init__( charm, relation_name, RELATION_INTERFACE_NAME, RelationRole.requires ) super().__init__( - charm, relation_name, alert_rules_path, recursive, skip_alert_topology_labeling + charm, + relation_name, + alert_rules_path, + recursive, + skip_alert_topology_labeling, + forward_alert_rules=forward_alert_rules, ) events = self._charm.on[relation_name] self.framework.observe(self._charm.on.upgrade_charm, self._on_lifecycle_event) + self.framework.observe(self._charm.on.config_changed, self._on_lifecycle_event) self.framework.observe(events.relation_joined, self._on_logging_relation_joined) self.framework.observe(events.relation_changed, self._on_logging_relation_changed) self.framework.observe(events.relation_departed, self._on_logging_relation_departed) + if refresh_event: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + for ev in refresh_event: + self.framework.observe(ev, self._on_lifecycle_event) + def _on_lifecycle_event(self, _: HookEvent): """Update require relation data on charm upgrades and other lifecycle events. @@ -2550,10 +2572,17 @@ def __init__( alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH, recursive: bool = True, skip_alert_topology_labeling: bool = False, + refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, + forward_alert_rules: bool = True, ): _PebbleLogClient.check_juju_version() super().__init__( - charm, relation_name, alert_rules_path, recursive, skip_alert_topology_labeling + charm, + relation_name, + alert_rules_path, + recursive, + skip_alert_topology_labeling, + forward_alert_rules=forward_alert_rules, ) self._charm = charm self._relation_name = relation_name @@ -2564,6 +2593,12 @@ def __init__( self.framework.observe(on.relation_departed, self._update_logging) self.framework.observe(on.relation_broken, self._update_logging) + if refresh_event: + if not isinstance(refresh_event, list): + refresh_event = [refresh_event] + for ev in refresh_event: + self.framework.observe(ev, self._update_logging) + for container_name in self._charm.meta.containers.keys(): snake_case_container_name = container_name.replace("-", "_") self.framework.observe( diff --git a/lib/charms/observability_libs/v1/kubernetes_service_patch.py b/lib/charms/observability_libs/v1/kubernetes_service_patch.py deleted file mode 100644 index 4d37a38d9f..0000000000 --- a/lib/charms/observability_libs/v1/kubernetes_service_patch.py +++ /dev/null @@ -1,305 +0,0 @@ -# Copyright 2021 Canonical Ltd. -# See LICENSE file for licensing details. - -"""# [DEPRECATED!] KubernetesServicePatch Library. - -The `kubernetes_service_patch` library is DEPRECATED and will be removed in October 2025. - -For patching the Kubernetes service created by Juju during the deployment of a charm, -`ops.Unit.set_ports` functionality should be used instead. - -""" - -import logging -from types import MethodType -from typing import Any, List, Literal, Optional, Union - -from lightkube import ApiError, Client # pyright: ignore -from lightkube.core import exceptions -from lightkube.models.core_v1 import ServicePort, ServiceSpec -from lightkube.models.meta_v1 import ObjectMeta -from lightkube.resources.core_v1 import Service -from lightkube.types import PatchType -from ops import UpgradeCharmEvent -from ops.charm import CharmBase -from ops.framework import BoundEvent, Object - -logger = logging.getLogger(__name__) - -# The unique Charmhub library identifier, never change it -LIBID = "0042f86d0a874435adef581806cddbbb" - -# Increment this major API version when introducing breaking changes -LIBAPI = 1 - -# Increment this PATCH version before using `charmcraft publish-lib` or reset -# to 0 if you are raising the major API version -LIBPATCH = 13 - -ServiceType = Literal["ClusterIP", "LoadBalancer"] - - -class KubernetesServicePatch(Object): - """A utility for patching the Kubernetes service set up by Juju.""" - - def __init__( - self, - charm: CharmBase, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - *, - refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, - ): - """Constructor for KubernetesServicePatch. - - Args: - charm: the charm that is instantiating the library. - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - refresh_event: an optional bound event or list of bound events which - will be observed to re-apply the patch (e.g. on port change). - The `install` and `upgrade-charm` events would be observed regardless. - """ - logger.warning( - "The ``kubernetes_service_patch v1`` library is DEPRECATED and will be removed " - "in October 2025. For patching the Kubernetes service created by Juju during " - "the deployment of a charm, ``ops.Unit.set_ports`` functionality should be used instead." - ) - super().__init__(charm, "kubernetes-service-patch") - self.charm = charm - self.service_name = service_name or self._app - # To avoid conflicts with the default Juju service, append "-lb" to the service name. - # The Juju application name is retained for the default service created by Juju. - if self.service_name == self._app and service_type == "LoadBalancer": - self.service_name = f"{self._app}-lb" - self.service_type = service_type - self.service = self._service_object( - ports, - self.service_name, - service_type, - additional_labels, - additional_selectors, - additional_annotations, - ) - - # Make mypy type checking happy that self._patch is a method - assert isinstance(self._patch, MethodType) - # Ensure this patch is applied during the 'install' and 'upgrade-charm' events - self.framework.observe(charm.on.install, self._patch) - self.framework.observe(charm.on.upgrade_charm, self._on_upgrade_charm) - self.framework.observe(charm.on.update_status, self._patch) - # Sometimes Juju doesn't clean-up a manually created LB service, - # so we clean it up ourselves just in case. - self.framework.observe(charm.on.remove, self._remove_service) - - # apply user defined events - if refresh_event: - if not isinstance(refresh_event, list): - refresh_event = [refresh_event] - - for evt in refresh_event: - self.framework.observe(evt, self._patch) - - def _service_object( - self, - ports: List[ServicePort], - service_name: Optional[str] = None, - service_type: ServiceType = "ClusterIP", - additional_labels: Optional[dict] = None, - additional_selectors: Optional[dict] = None, - additional_annotations: Optional[dict] = None, - ) -> Service: - """Creates a valid Service representation. - - Args: - ports: a list of ServicePorts - service_name: allows setting custom name to the patched service. If none given, - application name will be used. - service_type: desired type of K8s service. Default value is in line with ServiceSpec's - default value. - additional_labels: Labels to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_selectors: Selectors to be added to the kubernetes service (by default only - "app.kubernetes.io/name" is set to the service name) - additional_annotations: Annotations to be added to the kubernetes service. - - Returns: - Service: A valid representation of a Kubernetes Service with the correct ports. - """ - if not service_name: - service_name = self._app - labels = {"app.kubernetes.io/name": self._app} - if additional_labels: - labels.update(additional_labels) - selector = {"app.kubernetes.io/name": self._app} - if additional_selectors: - selector.update(additional_selectors) - return Service( - apiVersion="v1", - kind="Service", - metadata=ObjectMeta( - namespace=self._namespace, - name=service_name, - labels=labels, - annotations=additional_annotations, # type: ignore[arg-type] - ), - spec=ServiceSpec( - selector=selector, - ports=ports, - type=service_type, - ), - ) - - def _patch(self, _) -> None: - """Patch the Kubernetes service created by Juju to map the correct port. - - Raises: - PatchFailed: if patching fails due to lack of permissions, or otherwise. - """ - try: - client = Client() # pyright: ignore - except exceptions.ConfigError as e: - logger.warning("Error creating k8s client: %s", e) - return - - try: - if self._is_patched(client): - return - if self.service_name != self._app: - if not self.service_type == "LoadBalancer": - self._delete_and_create_service(client) - else: - self._create_lb_service(client) - client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE) - except ApiError as e: - if e.status.code == 403: - logger.error("Kubernetes service patch failed: `juju trust` this application.") - else: - logger.error("Kubernetes service patch failed: %s", str(e)) - else: - logger.info("Kubernetes service '%s' patched successfully", self._app) - - def _delete_and_create_service(self, client: Client): - service = client.get(Service, self._app, namespace=self._namespace) - service.metadata.name = self.service_name # type: ignore[attr-defined] - service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501 - client.delete(Service, self._app, namespace=self._namespace) - client.create(service) - - def _create_lb_service(self, client: Client): - try: - client.get(Service, self.service_name, namespace=self._namespace) - except ApiError: - client.create(self.service) - - def is_patched(self) -> bool: - """Reports if the service patch has been applied. - - Returns: - bool: A boolean indicating if the service patch has been applied. - """ - client = Client() # pyright: ignore - return self._is_patched(client) - - def _is_patched(self, client: Client) -> bool: - # Get the relevant service from the cluster - try: - service = client.get(Service, name=self.service_name, namespace=self._namespace) - except ApiError as e: - if e.status.code == 404 and self.service_name != self._app: - return False - logger.error("Kubernetes service get failed: %s", str(e)) - raise - - # Construct a list of expected ports, should the patch be applied - expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined] - # Construct a list in the same manner, using the fetched service - fetched_ports = [ - (p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined] - ] # noqa: E501 - return expected_ports == fetched_ports - - def _on_upgrade_charm(self, event: UpgradeCharmEvent): - """Handle the upgrade charm event.""" - # If a charm author changed the service type from LB to ClusterIP across an upgrade, we need to delete the previous LB. - if self.service_type == "ClusterIP": - - client = Client() # pyright: ignore - - # Define a label selector to find services related to the app - selector: dict[str, Any] = {"app.kubernetes.io/name": self._app} - - # Check if any service of type LoadBalancer exists - services = client.list(Service, namespace=self._namespace, labels=selector) - for service in services: - if ( - not service.metadata - or not service.metadata.name - or not service.spec - or not service.spec.type - ): - logger.warning( - "Service patch: skipping resource with incomplete metadata: %s.", service - ) - continue - if service.spec.type == "LoadBalancer": - client.delete(Service, service.metadata.name, namespace=self._namespace) - logger.info(f"LoadBalancer service {service.metadata.name} deleted.") - - # Continue the upgrade flow normally - self._patch(event) - - def _remove_service(self, _): - """Remove a Kubernetes service associated with this charm. - - Specifically designed to delete the load balancer service created by the charm, since Juju only deletes the - default ClusterIP service and not custom services. - - Returns: - None - - Raises: - ApiError: for deletion errors, excluding when the service is not found (404 Not Found). - """ - client = Client() # pyright: ignore - - try: - client.delete(Service, self.service_name, namespace=self._namespace) - logger.info("The patched k8s service '%s' was deleted.", self.service_name) - except ApiError as e: - if e.status.code == 404: - # Service not found, so no action needed - return - # Re-raise for other statuses - raise - - @property - def _app(self) -> str: - """Name of the current Juju application. - - Returns: - str: A string containing the name of the current Juju application. - """ - return self.charm.app.name - - @property - def _namespace(self) -> str: - """The Kubernetes namespace we're running in. - - Returns: - str: A string containing the name of the current Kubernetes namespace. - """ - with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: - return f.read().strip() diff --git a/lib/charms/postgresql_k8s/v0/postgresql.py b/lib/charms/postgresql_k8s/v0/postgresql.py index bdfef9afbb..9fe1957e4f 100644 --- a/lib/charms/postgresql_k8s/v0/postgresql.py +++ b/lib/charms/postgresql_k8s/v0/postgresql.py @@ -35,7 +35,10 @@ # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 42 +LIBPATCH = 46 + +# Groups to distinguish database permissions +PERMISSIONS_GROUP_ADMIN = "admin" INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles" @@ -187,7 +190,7 @@ def create_database( Identifier(database) ) ) - for user_to_grant_access in [user, "admin", *self.system_users]: + for user_to_grant_access in [user, PERMISSIONS_GROUP_ADMIN, *self.system_users]: cursor.execute( SQL("GRANT ALL PRIVILEGES ON DATABASE {} TO {};").format( Identifier(database), Identifier(user_to_grant_access) @@ -220,7 +223,7 @@ def create_user( user: str, password: Optional[str] = None, admin: bool = False, - extra_user_roles: Optional[str] = None, + extra_user_roles: Optional[List[str]] = None, ) -> None: """Creates a database user. @@ -235,16 +238,17 @@ def create_user( admin_role = False roles = privileges = None if extra_user_roles: - extra_user_roles = tuple(extra_user_roles.lower().split(",")) - admin_role = "admin" in extra_user_roles + admin_role = PERMISSIONS_GROUP_ADMIN in extra_user_roles valid_privileges, valid_roles = self.list_valid_privileges_and_roles() roles = [ - role for role in extra_user_roles if role in valid_roles and role != "admin" + role + for role in extra_user_roles + if role in valid_roles and role != PERMISSIONS_GROUP_ADMIN ] privileges = { extra_user_role for extra_user_role in extra_user_roles - if extra_user_role not in roles and extra_user_role != "admin" + if extra_user_role not in roles and extra_user_role != PERMISSIONS_GROUP_ADMIN } invalid_privileges = [ privilege for privilege in privileges if privilege not in valid_privileges @@ -479,6 +483,19 @@ def get_postgresql_timezones(self) -> Set[str]: timezones = cursor.fetchall() return {timezone[0] for timezone in timezones} + def get_postgresql_default_table_access_methods(self) -> Set[str]: + """Returns the PostgreSQL available table access methods. + + Returns: + Set of PostgreSQL table access methods. + """ + with self._connect_to_database( + database_host=self.current_host + ) as connection, connection.cursor() as cursor: + cursor.execute("SELECT amname FROM pg_am WHERE amtype = 't';") + access_methods = cursor.fetchall() + return {access_method[0] for access_method in access_methods} + def get_postgresql_version(self, current_host=True) -> str: """Returns the PostgreSQL version. @@ -566,8 +583,8 @@ def set_up_database(self) -> None: ) ) self.create_user( - "admin", - extra_user_roles="pg_read_all_data,pg_write_all_data", + PERMISSIONS_GROUP_ADMIN, + extra_user_roles=["pg_read_all_data", "pg_write_all_data"], ) cursor.execute("GRANT CONNECT ON DATABASE postgres TO admin;") except psycopg2.Error as e: @@ -649,6 +666,8 @@ def build_postgresql_parameters( for config, value in config_options.items(): # Filter config option not related to PostgreSQL parameters. if not config.startswith(( + "connection", + "cpu", "durability", "instance", "logging", @@ -656,6 +675,8 @@ def build_postgresql_parameters( "optimizer", "request", "response", + "session", + "storage", "vacuum", )): continue diff --git a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py index e3d35c6f30..d1169ef3dc 100644 --- a/lib/charms/prometheus_k8s/v0/prometheus_scrape.py +++ b/lib/charms/prometheus_k8s/v0/prometheus_scrape.py @@ -340,8 +340,8 @@ def _on_scrape_targets_changed(self, event): import yaml from cosl import JujuTopology -from cosl.rules import AlertRules -from ops.charm import CharmBase, RelationRole +from cosl.rules import AlertRules, generic_alert_groups +from ops.charm import CharmBase, RelationJoinedEvent, RelationRole from ops.framework import ( BoundEvent, EventBase, @@ -362,7 +362,7 @@ def _on_scrape_targets_changed(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 47 +LIBPATCH = 50 PYDEPS = ["cosl"] @@ -1309,6 +1309,8 @@ def __init__( refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None, external_url: str = "", lookaside_jobs_callable: Optional[Callable] = None, + *, + forward_alert_rules: bool = True, ): """Construct a metrics provider for a Prometheus charm. @@ -1411,6 +1413,7 @@ def __init__( files. Defaults to "./prometheus_alert_rules", resolved relative to the directory hosting the charm entry file. The alert rules are automatically updated on charm upgrade. + forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules. refresh_event: an optional bound event or list of bound events which will be observed to re-set scrape job data (IP address and others) external_url: an optional argument that represents an external url that @@ -1449,6 +1452,7 @@ def __init__( self._charm = charm self._alert_rules_path = alert_rules_path + self._forward_alert_rules = forward_alert_rules self._relation_name = relation_name # sanitize job configurations to the supported subset of parameters jobs = [] if jobs is None else jobs @@ -1530,7 +1534,11 @@ def set_scrape_job_spec(self, _=None): return alert_rules = AlertRules(query_type="promql", topology=self.topology) - alert_rules.add_path(self._alert_rules_path, recursive=True) + if self._forward_alert_rules: + alert_rules.add_path(self._alert_rules_path, recursive=True) + alert_rules.add( + generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier + ) alert_rules_as_dict = alert_rules.as_dict() for relation in self._charm.model.relations[self._relation_name]: @@ -1776,6 +1784,9 @@ def __init__( relation_names: Optional[dict] = None, relabel_instance=True, resolve_addresses=False, + path_to_own_alert_rules: Optional[str] = None, + *, + forward_alert_rules: bool = True, ): """Construct a `MetricsEndpointAggregator`. @@ -1795,6 +1806,8 @@ def __init__( resolve_addresses: A boolean flag indiccating if the aggregator should attempt to perform DNS lookups of targets and append a `dns_name` label + path_to_own_alert_rules: Optionally supply a path for alert rule files + forward_alert_rules: a boolean flag to toggle forwarding of charmed alert rules """ self._charm = charm @@ -1807,15 +1820,21 @@ def __init__( self._alert_rules_relation = relation_names.get("alert_rules", "prometheus-rules") super().__init__(charm, self._prometheus_relation) + self.topology = JujuTopology.from_charm(charm) + self._stored.set_default(jobs=[], alert_rules=[]) self._relabel_instance = relabel_instance self._resolve_addresses = resolve_addresses + self._forward_alert_rules = forward_alert_rules + # manage Prometheus charm relation events prometheus_events = self._charm.on[self._prometheus_relation] self.framework.observe(prometheus_events.relation_joined, self._set_prometheus_data) + self.path_to_own_alert_rules = path_to_own_alert_rules + # manage list of Prometheus scrape jobs from related scrape targets target_events = self._charm.on[self._target_relation] self.framework.observe(target_events.relation_changed, self._on_prometheus_targets_changed) @@ -1828,7 +1847,7 @@ def __init__( self.framework.observe(alert_rule_events.relation_changed, self._on_alert_rules_changed) self.framework.observe(alert_rule_events.relation_departed, self._on_alert_rules_departed) - def _set_prometheus_data(self, event): + def _set_prometheus_data(self, event: Optional[RelationJoinedEvent] = None): """Ensure every new Prometheus instances is updated. Any time a new Prometheus unit joins the relation with @@ -1838,6 +1857,7 @@ def _set_prometheus_data(self, event): if not self._charm.unit.is_leader(): return + # Gather the scrape jobs jobs = [] + _type_convert_stored( self._stored.jobs # pyright: ignore ) # list of scrape jobs, one per relation @@ -1846,6 +1866,7 @@ def _set_prometheus_data(self, event): if targets and relation.app: jobs.append(self._static_scrape_job(targets, relation.app.name)) + # Gather the alert rules groups = [] + _type_convert_stored( self._stored.alert_rules # pyright: ignore ) # list of alert rule groups @@ -1856,9 +1877,23 @@ def _set_prometheus_data(self, event): rules = self._label_alert_rules(unit_rules, appname) group = {"name": self.group_name(appname), "rules": rules} groups.append(group) - - event.relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) - event.relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + alert_rules = AlertRules(query_type="promql", topology=self.topology) + # Add alert rules from file + if self.path_to_own_alert_rules: + alert_rules.add_path(self.path_to_own_alert_rules, recursive=True) + # Add generic alert rules + alert_rules.add( + generic_alert_groups.application_rules, group_name_prefix=self.topology.identifier + ) + groups.extend(alert_rules.as_dict()["groups"]) + + # Set scrape jobs and alert rules in relation data + relations = [event.relation] if event else self.model.relations[self._prometheus_relation] + for rel in relations: + rel.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs) # type: ignore + rel.data[self._charm.app]["alert_rules"] = json.dumps( # type: ignore + {"groups": groups if self._forward_alert_rules else []} + ) def _on_prometheus_targets_changed(self, event): """Update scrape jobs in response to scrape target changes. @@ -2129,7 +2164,9 @@ def set_alert_rule_data(self, name: str, unit_rules: dict, label_rules: bool = T if updated_group["name"] not in [g["name"] for g in groups]: groups.append(updated_group) - relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups}) + relation.data[self._charm.app]["alert_rules"] = json.dumps( + {"groups": groups if self._forward_alert_rules else []} + ) if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore self._stored.alert_rules = groups @@ -2177,8 +2214,8 @@ def remove_alert_rules(self, group_name: str, unit_name: str) -> None: changed_group["rules"] = rules_kept # type: ignore groups.append(changed_group) - relation.data[self._charm.app]["alert_rules"] = ( - json.dumps({"groups": groups}) if groups else "{}" + relation.data[self._charm.app]["alert_rules"] = json.dumps( + {"groups": groups if self._forward_alert_rules else []} ) if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore @@ -2364,12 +2401,9 @@ def _get_tool_path(self) -> Optional[Path]: arch = "amd64" if arch == "x86_64" else arch res = "cos-tool-{}".format(arch) try: - path = Path(res).resolve() - path.chmod(0o777) + path = Path(res).resolve(strict=True) return path - except NotImplementedError: - logger.debug("System lacks support for chmod") - except FileNotFoundError: + except (FileNotFoundError, OSError): logger.debug('Could not locate cos-tool at: "{}"'.format(res)) return None diff --git a/lib/charms/rolling_ops/v0/rollingops.py b/lib/charms/rolling_ops/v0/rollingops.py index 57aa9bf352..13b51a3051 100644 --- a/lib/charms/rolling_ops/v0/rollingops.py +++ b/lib/charms/rolling_ops/v0/rollingops.py @@ -63,13 +63,14 @@ def _on_trigger_restart(self, event): juju run-action some-charm/0 some-charm/1 <... some-charm/n> restart ``` -Note that all units that plan to restart must receive the action and emit the aquire +Note that all units that plan to restart must receive the action and emit the acquire event. Any units that do not run their acquire handler will be left out of the rolling restart. (An operator might take advantage of this fact to recover from a failed rolling operation without restarting workloads that were able to successfully restart -- simply omit the successful units from a subsequent run-action call.) """ + import logging from enum import Enum from typing import AnyStr, Callable, Optional @@ -88,7 +89,7 @@ def _on_trigger_restart(self, event): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 7 +LIBPATCH = 8 class LockNoRelationError(Exception): @@ -149,7 +150,6 @@ class Lock: """ def __init__(self, manager, unit=None): - self.relation = manager.model.relations[manager.name][0] if not self.relation: # TODO: defer caller in this case (probably just fired too soon). @@ -246,7 +246,7 @@ def __init__(self, manager): # Gather all the units. relation = manager.model.relations[manager.name][0] - units = [unit for unit in relation.units] + units = list(relation.units) # Plus our unit ... units.append(manager.model.unit) diff --git a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py index cf8def11ac..e2208f756f 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/charm_tracing.py @@ -10,24 +10,28 @@ in real time from the Grafana dashboard the execution flow of your charm. # Quickstart -Fetch the following charm libs (and ensure the minimum version/revision numbers are satisfied): +Fetch the following charm libs: - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing # >= 1.10 - charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing # >= 2.7 + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.tracing + charmcraft fetch-lib charms.tempo_coordinator_k8s.v0.charm_tracing Then edit your charm code to include: ```python # import the necessary charm libs -from charms.tempo_coordinator_k8s.v0.tracing import TracingEndpointRequirer, charm_tracing_config +from charms.tempo_coordinator_k8s.v0.tracing import ( + TracingEndpointRequirer, + charm_tracing_config, +) from charms.tempo_coordinator_k8s.v0.charm_tracing import charm_tracing + # decorate your charm class with charm_tracing: @charm_tracing( # forward-declare the instance attributes that the instrumentor will look up to obtain the # tempo endpoint and server certificate tracing_endpoint="tracing_endpoint", - server_cert="server_cert" + server_cert="server_cert", ) class MyCharm(CharmBase): _path_to_cert = "/path/to/cert.crt" @@ -37,10 +41,12 @@ class MyCharm(CharmBase): # If you do support TLS, you'll need to make sure that the server cert is copied to this location # and kept up to date so the instrumentor can use it. - def __init__(self, ...): - ... - self.tracing = TracingEndpointRequirer(self, ...) - self.tracing_endpoint, self.server_cert = charm_tracing_config(self.tracing, self._path_to_cert) + def __init__(self, framework): + # ... + self.tracing = TracingEndpointRequirer(self) + self.tracing_endpoint, self.server_cert = charm_tracing_config( + self.tracing, self._path_to_cert + ) ``` # Detailed usage @@ -168,9 +174,10 @@ class MyCharm(CharmBase): ... ``` -## Upgrading from `v0` +## Upgrading from `tempo_k8s.v0` -If you are upgrading from `charm_tracing` v0, you need to take the following steps (assuming you already +If you are upgrading from `tempo_k8s.v0.charm_tracing` (note that since then, the charm library moved to +`tempo_coordinator_k8s.v0.charm_tracing`), you need to take the following steps (assuming you already have the newest version of the library in your charm): 1) If you need the dependency for your tests, add the following dependency to your charm project (or, if your project had a dependency on `opentelemetry-exporter-otlp-proto-grpc` only because @@ -183,7 +190,7 @@ class MyCharm(CharmBase): For example: ``` - from charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm + from charms.tempo_k8s.v0.charm_tracing import trace_charm @trace_charm( tracing_endpoint="my_tracing_endpoint", @@ -225,12 +232,6 @@ def my_tracing_endpoint(self) -> Optional[str]: 3) If you were passing a certificate (str) using `server_cert`, you need to change it to provide an *absolute* path to the certificate file instead. """ -import typing - -from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( - encode_spans, -) -from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter def _remove_stale_otel_sdk_packages(): @@ -285,12 +286,15 @@ def _remove_stale_otel_sdk_packages(): # apply hacky patch to remove stale opentelemetry sdk packages on upgrade-charm. # it could be trouble if someone ever decides to implement their own tracer parallel to # ours and before the charm has inited. We assume they won't. +# !!IMPORTANT!! keep all otlp imports UNDER this call. _remove_stale_otel_sdk_packages() import functools import inspect import logging import os +import typing +from collections import deque from contextlib import contextmanager from contextvars import Context, ContextVar, copy_context from pathlib import Path @@ -309,6 +313,9 @@ def _remove_stale_otel_sdk_packages(): import opentelemetry import ops +from opentelemetry.exporter.otlp.proto.common._internal.trace_encoder import ( + encode_spans, +) from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace import ReadableSpan, Span, TracerProvider @@ -317,7 +324,11 @@ def _remove_stale_otel_sdk_packages(): SpanExporter, SpanExportResult, ) -from opentelemetry.trace import INVALID_SPAN, Tracer +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.trace import ( + INVALID_SPAN, + Tracer, +) from opentelemetry.trace import get_current_span as otlp_get_current_span from opentelemetry.trace import ( get_tracer, @@ -337,7 +348,7 @@ def _remove_stale_otel_sdk_packages(): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 7 PYDEPS = ["opentelemetry-exporter-otlp-proto-http==1.21.0"] @@ -365,7 +376,9 @@ def _remove_stale_otel_sdk_packages(): BUFFER_DEFAULT_MAX_EVENT_HISTORY_LENGTH = 100 _MiB_TO_B = 2**20 # megabyte to byte conversion rate _OTLP_SPAN_EXPORTER_TIMEOUT = 1 -"""Timeout in seconds that the OTLP span exporter has to push traces to the backend.""" + + +# Timeout in seconds that the OTLP span exporter has to push traces to the backend. class _Buffer: @@ -397,45 +410,75 @@ def save(self, spans: typing.Sequence[ReadableSpan]): if self._max_event_history_length < 1: dev_logger.debug("buffer disabled: max history length < 1") return - - current_history_length = len(self.load()) - new_history_length = current_history_length + len(spans) - if (diff := self._max_event_history_length - new_history_length) < 0: - self.drop(diff) self._save(spans) def _serialize(self, spans: Sequence[ReadableSpan]) -> bytes: # encode because otherwise we can't json-dump them return encode_spans(spans).SerializeToString() + def _prune(self, queue: Sequence[bytes]) -> Sequence[bytes]: + """Prune the queue until it fits in our constraints.""" + n_dropped_spans = 0 + # drop older events if we are past the max history length + overflow = len(queue) - self._max_event_history_length + if overflow > 0: + n_dropped_spans += overflow + logger.warning( + f"charm tracing buffer exceeds max history length ({self._max_event_history_length} events)" + ) + + new_spans = deque(queue[-self._max_event_history_length :]) + + # drop older events if the buffer is too big; all units are bytes + logged_drop = False + target_size = self._max_buffer_size_mib * _MiB_TO_B + current_size = sum(len(span) for span in new_spans) + while current_size > target_size: + current_size -= len(new_spans.popleft()) + n_dropped_spans += 1 + + # only do this once + if not logged_drop: + logger.warning( + f"charm tracing buffer exceeds size limit ({self._max_buffer_size_mib}MiB)." + ) + logged_drop = True + + if n_dropped_spans > 0: + dev_logger.debug( + f"charm tracing buffer overflow: dropped {n_dropped_spans} older spans. " + f"Please increase the buffer limits, or ensure the spans can be flushed." + ) + return new_spans + def _save(self, spans: Sequence[ReadableSpan], replace: bool = False): dev_logger.debug(f"saving {len(spans)} new spans to buffer") old = [] if replace else self.load() - new = self._serialize(spans) + queue = old + [self._serialize(spans)] + new_buffer = self._prune(queue) - try: - # if the buffer exceeds the size limit, we start dropping old spans until it does - - while len((new + self._SPANSEP.join(old))) > (self._max_buffer_size_mib * _MiB_TO_B): - if not old: - # if we've already dropped all spans and still we can't get under the - # size limit, we can't save this span - logger.error( - f"span exceeds total buffer size limit ({self._max_buffer_size_mib}MiB); " - f"buffering FAILED" - ) - return - - old = old[1:] - logger.warning( - f"buffer size exceeds {self._max_buffer_size_mib}MiB; dropping older spans... " - f"Please increase the buffer size, disable buffering, or ensure the spans can be flushed." - ) + if queue and not new_buffer: + # this means that, given our constraints, we are pruning so much that there are no events left. + logger.error( + "No charm events could be buffered into charm traces buffer. Please increase the memory or history size limits." + ) + return - self._db_file.write_bytes(new + self._SPANSEP.join(old)) + try: + self._write(new_buffer) except Exception: logger.exception("error buffering spans") + def _write(self, spans: Sequence[bytes]): + """Write the spans to the db file.""" + # ensure the destination folder exists + db_file_dir = self._db_file.parent + if not db_file_dir.exists(): + dev_logger.info(f"creating buffer dir: {db_file_dir}") + db_file_dir.mkdir(parents=True) + + self._db_file.write_bytes(self._SPANSEP.join(spans)) + def load(self) -> List[bytes]: """Load currently buffered spans from the cache file. @@ -460,8 +503,10 @@ def drop(self, n_spans: Optional[int] = None): else: dev_logger.debug("emptying buffer") new = [] - - self._db_file.write_bytes(self._SPANSEP.join(new)) + try: + self._write(new) + except Exception: + logger.exception("error writing charm traces buffer") def flush(self) -> Optional[bool]: """Export all buffered spans to the given exporter, then clear the buffer. diff --git a/lib/charms/tempo_coordinator_k8s/v0/tracing.py b/lib/charms/tempo_coordinator_k8s/v0/tracing.py index 734a4ca0b2..e1eb44742b 100644 --- a/lib/charms/tempo_coordinator_k8s/v0/tracing.py +++ b/lib/charms/tempo_coordinator_k8s/v0/tracing.py @@ -110,7 +110,7 @@ def __init__(self, *args): # Increment this PATCH version before using `charmcraft publish-lib` or reset # to 0 if you are raising the major API version -LIBPATCH = 4 +LIBPATCH = 6 PYDEPS = ["pydantic"] @@ -129,9 +129,9 @@ def __init__(self, *args): ] RawReceiver = Tuple[ReceiverProtocol, str] -"""Helper type. A raw receiver is defined as a tuple consisting of the protocol name, and the (external, if available), -(secured, if available) resolvable server url. -""" +# Helper type. A raw receiver is defined as a tuple consisting of the protocol name, and the (external, if available), +# (secured, if available) resolvable server url. + BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"} @@ -150,8 +150,7 @@ class TransportProtocolType(str, enum.Enum): "jaeger_thrift_http": TransportProtocolType.http, "jaeger_grpc": TransportProtocolType.grpc, } -"""A mapping between telemetry protocols and their corresponding transport protocol. -""" +# A mapping between telemetry protocols and their corresponding transport protocol. class TracingError(Exception): @@ -951,7 +950,6 @@ def charm_tracing_config( proceed with charm tracing (with or without tls, as appropriate) Usage: - If you are using charm_tracing >= v1.9: >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") @@ -961,24 +959,6 @@ def charm_tracing_config( >>> self.tracing = TracingEndpointRequirer(...) >>> self.my_endpoint, self.cert_path = charm_tracing_config( ... self.tracing, self._cert_path) - - If you are using charm_tracing < v1.9: - >>> from lib.charms.tempo_coordinator_k8s.v0.charm_tracing import trace_charm - >>> from lib.charms.tempo_coordinator_k8s.v0.tracing import charm_tracing_config - >>> @trace_charm(tracing_endpoint="my_endpoint", cert_path="cert_path") - >>> class MyCharm(...): - >>> _cert_path = "/path/to/cert/on/charm/container.crt" - >>> def __init__(self, ...): - >>> self.tracing = TracingEndpointRequirer(...) - >>> self._my_endpoint, self._cert_path = charm_tracing_config( - ... self.tracing, self._cert_path) - >>> @property - >>> def my_endpoint(self): - >>> return self._my_endpoint - >>> @property - >>> def cert_path(self): - >>> return self._cert_path - """ if not endpoint_requirer.is_ready(): return None, None diff --git a/metadata.yaml b/metadata.yaml index 6b4192d904..18e5a4626a 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -28,7 +28,7 @@ resources: postgresql-image: type: oci-image description: OCI image for PostgreSQL - upstream-source: ghcr.io/canonical/charmed-postgresql@sha256:7e41d7f60e45ee2f5463aa9aafcd3c35121423423ee08c26a174b99ad0235b7e # renovate: oci-image tag: 14.15-22.04_edge + upstream-source: ghcr.io/canonical/charmed-postgresql@sha256:42f9b03c31a8e9cc11054e2aced2ecb9a64cffd9cd72cd5ea83e9ee293f477f9 # renovate: oci-image tag: 16.8-24.04_edge peers: database-peers: @@ -45,10 +45,6 @@ provides: optional: true database: interface: postgresql_client - db: - interface: pgsql - db-admin: - interface: pgsql metrics-endpoint: interface: prometheus_scrape grafana-dashboard: @@ -84,9 +80,6 @@ storage: assumes: - k8s-api - any-of: - - all-of: - - juju >= 2.9.49 - - juju < 3 - all-of: - juju >= 3.4.3 - juju < 3.5 diff --git a/poetry.lock b/poetry.lock index 54cf53e099..c624c0fbfc 100644 --- a/poetry.lock +++ b/poetry.lock @@ -17,26 +17,21 @@ allure-python-commons = "2.13.5" pytest = ">=4.5.0" [[package]] -name = "allure-pytest-collection-report" -version = "0.1.0" -description = "" +name = "allure-pytest-default-results" +version = "0.1.2" +description = "Generate default \"unknown\" results to show in Allure Report if test case does not run" optional = false python-versions = ">=3.8" groups = ["integration"] -files = [] -develop = false +files = [ + {file = "allure_pytest_default_results-0.1.2-py3-none-any.whl", hash = "sha256:8dc6c5a5d548661c38111a2890509e794204586fa81cefbe61315fb63996e50c"}, + {file = "allure_pytest_default_results-0.1.2.tar.gz", hash = "sha256:eb6c16aa1c2ede69e653a0ee38094791685eaacb0ac6b2cae5c6da1379dbdbfd"}, +] [package.dependencies] allure-pytest = ">=2.13.5" pytest = "*" -[package.source] -type = "git" -url = "https://github.com/canonical/data-platform-workflows" -reference = "v29.0.5" -resolved_reference = "0e591badc29bcde5039e6bd58da26f34ef065c0f" -subdirectory = "python/pytest_plugins/allure_pytest_collection_report" - [[package]] name = "allure-python-commons" version = "2.13.5" @@ -94,14 +89,14 @@ test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "attrs" -version = "24.3.0" +version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" groups = ["charm-libs", "integration"] files = [ - {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, - {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, + {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, + {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, ] [package.extras] @@ -178,18 +173,18 @@ typecheck = ["mypy"] [[package]] name = "boto3" -version = "1.35.70" +version = "1.35.99" description = "The AWS SDK for Python" optional = false python-versions = ">=3.8" groups = ["main", "integration"] files = [ - {file = "boto3-1.35.70-py3-none-any.whl", hash = "sha256:ca385708f83f01b3f27d9d675880d2458cb3b40ed1e25da688f551454ed0c112"}, - {file = "boto3-1.35.70.tar.gz", hash = "sha256:121dce8c7102eea6a6047d46bcd74e8a24dac793a4a3857de4f4bad9c12566fd"}, + {file = "boto3-1.35.99-py3-none-any.whl", hash = "sha256:83e560faaec38a956dfb3d62e05e1703ee50432b45b788c09e25107c5058bd71"}, + {file = "boto3-1.35.99.tar.gz", hash = "sha256:e0abd794a7a591d90558e92e29a9f8837d25ece8e3c120e530526fe27eba5fca"}, ] [package.dependencies] -botocore = ">=1.35.70,<1.36.0" +botocore = ">=1.35.99,<1.36.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -230,14 +225,14 @@ files = [ [[package]] name = "certifi" -version = "2024.12.14" +version = "2025.1.31" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" groups = ["main", "charm-libs", "integration"] files = [ - {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, - {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, + {file = "certifi-2025.1.31-py3-none-any.whl", hash = "sha256:ca78db4565a652026a4db2bcdf68f2fb589ea80d0be70e03929ed730746b84fe"}, + {file = "certifi-2025.1.31.tar.gz", hash = "sha256:3d5da6925056f6f18f119200434a4780a94263f10d1c21d032a6f6b2baa20651"}, ] [[package]] @@ -425,14 +420,14 @@ files = [ [[package]] name = "codespell" -version = "2.4.0" +version = "2.4.1" description = "Fix common misspellings in text files" optional = false python-versions = ">=3.8" groups = ["lint"] files = [ - {file = "codespell-2.4.0-py3-none-any.whl", hash = "sha256:b4c5b779f747dd481587aeecb5773301183f52b94b96ed51a28126d0482eec1d"}, - {file = "codespell-2.4.0.tar.gz", hash = "sha256:587d45b14707fb8ce51339ba4cce50ae0e98ce228ef61f3c5e160e34f681be58"}, + {file = "codespell-2.4.1-py3-none-any.whl", hash = "sha256:3dadafa67df7e4a3dbf51e0d7315061b80d265f9552ebd699b3dd6834b47e425"}, + {file = "codespell-2.4.1.tar.gz", hash = "sha256:299fcdcb09d23e81e35a671bbe746d5ad7e8385972e65dbb833a2eaac33c01e5"}, ] [package.extras] @@ -456,14 +451,14 @@ files = [ [[package]] name = "cosl" -version = "0.0.51" +version = "0.0.55" description = "Utils for COS Lite charms" optional = false python-versions = ">=3.8" groups = ["charm-libs"] files = [ - {file = "cosl-0.0.51-py3-none-any.whl", hash = "sha256:2ef43a94f0ca130fb4f2af924b75329f3c5e74b5c40ad4036af16713ad7d47d4"}, - {file = "cosl-0.0.51.tar.gz", hash = "sha256:32af380475bba32df7334d53ff16fb93466a169c7433e79a9fef8dbbecfdd43c"}, + {file = "cosl-0.0.55-py3-none-any.whl", hash = "sha256:bf641d611f982c8f494f3cf72ac4181b24e30c69504cfbd55aa8f54964797f90"}, + {file = "cosl-0.0.55.tar.gz", hash = "sha256:d3b8ee6f78302ac111d3a15d36c42a38c298a806161d762869513d348d778316"}, ] [package.dependencies] @@ -476,74 +471,75 @@ typing-extensions = "*" [[package]] name = "coverage" -version = "7.6.10" +version = "7.6.12" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["unit"] files = [ - {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"}, - {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"}, - {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"}, - {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"}, - {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"}, - {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"}, - {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"}, - {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"}, - {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"}, - {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"}, - {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"}, - {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"}, - {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"}, - {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"}, - {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"}, - {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"}, - {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"}, - {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"}, - {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"}, - {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"}, - {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"}, - {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"}, - {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"}, - {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"}, - {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"}, - {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"}, - {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"}, - {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"}, - {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"}, - {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"}, - {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"}, - {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"}, - {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"}, - {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"}, - {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"}, - {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"}, - {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"}, - {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"}, - {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"}, - {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"}, - {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"}, - {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"}, - {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"}, - {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"}, - {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"}, - {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"}, - {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"}, - {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"}, - {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"}, - {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"}, - {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"}, - {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"}, - {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"}, - {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"}, - {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"}, - {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"}, - {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"}, - {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"}, - {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"}, - {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"}, - {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"}, - {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:704c8c8c6ce6569286ae9622e534b4f5b9759b6f2cd643f1c1a61f666d534fe8"}, + {file = "coverage-7.6.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ad7525bf0241e5502168ae9c643a2f6c219fa0a283001cee4cf23a9b7da75879"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06097c7abfa611c91edb9e6920264e5be1d6ceb374efb4986f38b09eed4cb2fe"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220fa6c0ad7d9caef57f2c8771918324563ef0d8272c94974717c3909664e674"}, + {file = "coverage-7.6.12-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3688b99604a24492bcfe1c106278c45586eb819bf66a654d8a9a1433022fb2eb"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d1a987778b9c71da2fc8948e6f2656da6ef68f59298b7e9786849634c35d2c3c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:cec6b9ce3bd2b7853d4a4563801292bfee40b030c05a3d29555fd2a8ee9bd68c"}, + {file = "coverage-7.6.12-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ace9048de91293e467b44bce0f0381345078389814ff6e18dbac8fdbf896360e"}, + {file = "coverage-7.6.12-cp310-cp310-win32.whl", hash = "sha256:ea31689f05043d520113e0552f039603c4dd71fa4c287b64cb3606140c66f425"}, + {file = "coverage-7.6.12-cp310-cp310-win_amd64.whl", hash = "sha256:676f92141e3c5492d2a1596d52287d0d963df21bf5e55c8b03075a60e1ddf8aa"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e18aafdfb3e9ec0d261c942d35bd7c28d031c5855dadb491d2723ba54f4c3015"}, + {file = "coverage-7.6.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66fe626fd7aa5982cdebad23e49e78ef7dbb3e3c2a5960a2b53632f1f703ea45"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ef01d70198431719af0b1f5dcbefc557d44a190e749004042927b2a3fed0702"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e92ae5a289a4bc4c0aae710c0948d3c7892e20fd3588224ebe242039573bf0"}, + {file = "coverage-7.6.12-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e695df2c58ce526eeab11a2e915448d3eb76f75dffe338ea613c1201b33bab2f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d74c08e9aaef995f8c4ef6d202dbd219c318450fe2a76da624f2ebb9c8ec5d9f"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e995b3b76ccedc27fe4f477b349b7d64597e53a43fc2961db9d3fbace085d69d"}, + {file = "coverage-7.6.12-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b1f097878d74fe51e1ddd1be62d8e3682748875b461232cf4b52ddc6e6db0bba"}, + {file = "coverage-7.6.12-cp311-cp311-win32.whl", hash = "sha256:1f7ffa05da41754e20512202c866d0ebfc440bba3b0ed15133070e20bf5aeb5f"}, + {file = "coverage-7.6.12-cp311-cp311-win_amd64.whl", hash = "sha256:e216c5c45f89ef8971373fd1c5d8d1164b81f7f5f06bbf23c37e7908d19e8558"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b172f8e030e8ef247b3104902cc671e20df80163b60a203653150d2fc204d1ad"}, + {file = "coverage-7.6.12-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:641dfe0ab73deb7069fb972d4d9725bf11c239c309ce694dd50b1473c0f641c3"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e549f54ac5f301e8e04c569dfdb907f7be71b06b88b5063ce9d6953d2d58574"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:959244a17184515f8c52dcb65fb662808767c0bd233c1d8a166e7cf74c9ea985"}, + {file = "coverage-7.6.12-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bda1c5f347550c359f841d6614fb8ca42ae5cb0b74d39f8a1e204815ebe25750"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1ceeb90c3eda1f2d8c4c578c14167dbd8c674ecd7d38e45647543f19839dd6ea"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f16f44025c06792e0fb09571ae454bcc7a3ec75eeb3c36b025eccf501b1a4c3"}, + {file = "coverage-7.6.12-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b076e625396e787448d27a411aefff867db2bffac8ed04e8f7056b07024eed5a"}, + {file = "coverage-7.6.12-cp312-cp312-win32.whl", hash = "sha256:00b2086892cf06c7c2d74983c9595dc511acca00665480b3ddff749ec4fb2a95"}, + {file = "coverage-7.6.12-cp312-cp312-win_amd64.whl", hash = "sha256:7ae6eabf519bc7871ce117fb18bf14e0e343eeb96c377667e3e5dd12095e0288"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:488c27b3db0ebee97a830e6b5a3ea930c4a6e2c07f27a5e67e1b3532e76b9ef1"}, + {file = "coverage-7.6.12-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d1095bbee1851269f79fd8e0c9b5544e4c00c0c24965e66d8cba2eb5bb535fd"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0533adc29adf6a69c1baa88c3d7dbcaadcffa21afbed3ca7a225a440e4744bf9"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53c56358d470fa507a2b6e67a68fd002364d23c83741dbc4c2e0680d80ca227e"}, + {file = "coverage-7.6.12-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64cbb1a3027c79ca6310bf101014614f6e6e18c226474606cf725238cf5bc2d4"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:79cac3390bfa9836bb795be377395f28410811c9066bc4eefd8015258a7578c6"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9b148068e881faa26d878ff63e79650e208e95cf1c22bd3f77c3ca7b1d9821a3"}, + {file = "coverage-7.6.12-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8bec2ac5da793c2685ce5319ca9bcf4eee683b8a1679051f8e6ec04c4f2fd7dc"}, + {file = "coverage-7.6.12-cp313-cp313-win32.whl", hash = "sha256:200e10beb6ddd7c3ded322a4186313d5ca9e63e33d8fab4faa67ef46d3460af3"}, + {file = "coverage-7.6.12-cp313-cp313-win_amd64.whl", hash = "sha256:2b996819ced9f7dbb812c701485d58f261bef08f9b85304d41219b1496b591ef"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:299cf973a7abff87a30609879c10df0b3bfc33d021e1adabc29138a48888841e"}, + {file = "coverage-7.6.12-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:4b467a8c56974bf06e543e69ad803c6865249d7a5ccf6980457ed2bc50312703"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2458f275944db8129f95d91aee32c828a408481ecde3b30af31d552c2ce284a0"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a9d8be07fb0832636a0f72b80d2a652fe665e80e720301fb22b191c3434d924"}, + {file = "coverage-7.6.12-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14d47376a4f445e9743f6c83291e60adb1b127607a3618e3185bbc8091f0467b"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b95574d06aa9d2bd6e5cc35a5bbe35696342c96760b69dc4287dbd5abd4ad51d"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ecea0c38c9079570163d663c0433a9af4094a60aafdca491c6a3d248c7432827"}, + {file = "coverage-7.6.12-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2251fabcfee0a55a8578a9d29cecfee5f2de02f11530e7d5c5a05859aa85aee9"}, + {file = "coverage-7.6.12-cp313-cp313t-win32.whl", hash = "sha256:eb5507795caabd9b2ae3f1adc95f67b1104971c22c624bb354232d65c4fc90b3"}, + {file = "coverage-7.6.12-cp313-cp313t-win_amd64.whl", hash = "sha256:f60a297c3987c6c02ffb29effc70eadcbb412fe76947d394a1091a3615948e2f"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e7575ab65ca8399c8c4f9a7d61bbd2d204c8b8e447aab9d355682205c9dd948d"}, + {file = "coverage-7.6.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8161d9fbc7e9fe2326de89cd0abb9f3599bccc1287db0aba285cb68d204ce929"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a1e465f398c713f1b212400b4e79a09829cd42aebd360362cd89c5bdc44eb87"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f25d8b92a4e31ff1bd873654ec367ae811b3a943583e05432ea29264782dc32c"}, + {file = "coverage-7.6.12-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a936309a65cc5ca80fa9f20a442ff9e2d06927ec9a4f54bcba9c14c066323f2"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aa6f302a3a0b5f240ee201297fff0bbfe2fa0d415a94aeb257d8b461032389bd"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:f973643ef532d4f9be71dd88cf7588936685fdb576d93a79fe9f65bc337d9d73"}, + {file = "coverage-7.6.12-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:78f5243bb6b1060aed6213d5107744c19f9571ec76d54c99cc15938eb69e0e86"}, + {file = "coverage-7.6.12-cp39-cp39-win32.whl", hash = "sha256:69e62c5034291c845fc4df7f8155e8544178b6c774f97a99e2734b05eb5bed31"}, + {file = "coverage-7.6.12-cp39-cp39-win_amd64.whl", hash = "sha256:b01a840ecc25dce235ae4c1b6a0daefb2a203dba0e6e980637ee9c2f6ee0df57"}, + {file = "coverage-7.6.12-pp39.pp310-none-any.whl", hash = "sha256:7e39e845c4d764208e7b8f6a21c541ade741e2c41afabdfa1caa28687a3c98cf"}, + {file = "coverage-7.6.12-py3-none-any.whl", hash = "sha256:eb8668cfbc279a536c633137deeb9435d2962caec279c3f8cf8b91fff6ff8953"}, + {file = "coverage-7.6.12.tar.gz", hash = "sha256:48cfc4641d95d34766ad41d9573cc0f22a48aa88d22657a1fe01dca0dbae4de2"}, ] [package.dependencies] @@ -554,39 +550,43 @@ toml = ["tomli"] [[package]] name = "cryptography" -version = "44.0.0" +version = "44.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = "!=3.9.0,!=3.9.1,>=3.7" groups = ["charm-libs", "integration"] files = [ - {file = "cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123"}, - {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092"}, - {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f"}, - {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb"}, - {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b"}, - {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543"}, - {file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e"}, - {file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e"}, - {file = "cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053"}, - {file = "cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd"}, - {file = "cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591"}, - {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7"}, - {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc"}, - {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289"}, - {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7"}, - {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c"}, - {file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64"}, - {file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285"}, - {file = "cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417"}, - {file = "cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:37d76e6863da3774cd9db5b409a9ecfd2c71c981c38788d3fcfaf177f447b731"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:f677e1268c4e23420c3acade68fac427fffcb8d19d7df95ed7ad17cdef8404f4"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f5e7cb1e5e56ca0933b4873c0220a78b773b24d40d186b6738080b73d3d0a756"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:8b3e6eae66cf54701ee7d9c83c30ac0a1e3fa17be486033000f2a73a12ab507c"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:be4ce505894d15d5c5037167ffb7f0ae90b7be6f2a98f9a5c3442395501c32fa"}, - {file = "cryptography-44.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:62901fb618f74d7d81bf408c8719e9ec14d863086efe4185afd07c352aee1d2c"}, - {file = "cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02"}, + {file = "cryptography-44.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf688f615c29bfe9dfc44312ca470989279f0e94bb9f631f85e3459af8efc009"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd7c7e2d71d908dc0f8d2027e1604102140d84b155e658c20e8ad1304317691f"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:887143b9ff6bad2b7570da75a7fe8bbf5f65276365ac259a5d2d5147a73775f2"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:322eb03ecc62784536bc173f1483e76747aafeb69c8728df48537eb431cd1911"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:21377472ca4ada2906bc313168c9dc7b1d7ca417b63c1c3011d0c74b7de9ae69"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:df978682c1504fc93b3209de21aeabf2375cb1571d4e61907b3e7a2540e83026"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:eb3889330f2a4a148abead555399ec9a32b13b7c8ba969b72d8e500eb7ef84cd"}, + {file = "cryptography-44.0.1-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:8e6a85a93d0642bd774460a86513c5d9d80b5c002ca9693e63f6e540f1815ed0"}, + {file = "cryptography-44.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:6f76fdd6fd048576a04c5210d53aa04ca34d2ed63336d4abd306d0cbe298fddf"}, + {file = "cryptography-44.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6c8acf6f3d1f47acb2248ec3ea261171a671f3d9428e34ad0357148d492c7864"}, + {file = "cryptography-44.0.1-cp37-abi3-win32.whl", hash = "sha256:24979e9f2040c953a94bf3c6782e67795a4c260734e5264dceea65c8f4bae64a"}, + {file = "cryptography-44.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:fd0ee90072861e276b0ff08bd627abec29e32a53b2be44e41dbcdf87cbee2b00"}, + {file = "cryptography-44.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:a2d8a7045e1ab9b9f803f0d9531ead85f90c5f2859e653b61497228b18452008"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8272f257cf1cbd3f2e120f14c68bff2b6bdfcc157fafdee84a1b795efd72862"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e8d181e90a777b63f3f0caa836844a1182f1f265687fac2115fcf245f5fbec3"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:436df4f203482f41aad60ed1813811ac4ab102765ecae7a2bbb1dbb66dcff5a7"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4f422e8c6a28cf8b7f883eb790695d6d45b0c385a2583073f3cec434cc705e1a"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:72198e2b5925155497a5a3e8c216c7fb3e64c16ccee11f0e7da272fa93b35c4c"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:2a46a89ad3e6176223b632056f321bc7de36b9f9b93b2cc1cccf935a3849dc62"}, + {file = "cryptography-44.0.1-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:53f23339864b617a3dfc2b0ac8d5c432625c80014c25caac9082314e9de56f41"}, + {file = "cryptography-44.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:888fcc3fce0c888785a4876ca55f9f43787f4c5c1cc1e2e0da71ad481ff82c5b"}, + {file = "cryptography-44.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:00918d859aa4e57db8299607086f793fa7813ae2ff5a4637e318a25ef82730f7"}, + {file = "cryptography-44.0.1-cp39-abi3-win32.whl", hash = "sha256:9b336599e2cb77b1008cb2ac264b290803ec5e8e89d618a5e978ff5eb6f715d9"}, + {file = "cryptography-44.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:e403f7f766ded778ecdb790da786b418a9f2394f36e8cc8b796cc056ab05f44f"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f9a92144fa0c877117e9748c74501bea842f93d21ee00b0cf922846d9d0b183"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:610a83540765a8d8ce0f351ce42e26e53e1f774a6efb71eb1b41eb01d01c3d12"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5fed5cd6102bb4eb843e3315d2bf25fede494509bddadb81e03a859c1bc17b83"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:f4daefc971c2d1f82f03097dc6f216744a6cd2ac0f04c68fb935ea2ba2a0d420"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94f99f2b943b354a5b6307d7e8d19f5c423a794462bde2bf310c770ba052b1c4"}, + {file = "cryptography-44.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d9c5b9f698a83c8bd71e0f4d3f9f839ef244798e5ffe96febfa9714717db7af7"}, + {file = "cryptography-44.0.1.tar.gz", hash = "sha256:f51f5705ab27898afda1aaa430f34ad90dc117421057782022edf0600bec5f14"}, ] [package.dependencies] @@ -599,7 +599,7 @@ nox = ["nox (>=2024.4.15)", "nox[uv] (>=2024.3.2)"] pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.4)", "ruff (>=0.3.6)"] sdist = ["build (>=1.0.0)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi (>=2024)", "cryptography-vectors (==44.0.0)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test = ["certifi (>=2024)", "cryptography-vectors (==44.0.1)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] test-randomorder = ["pytest-randomly"] [[package]] @@ -616,21 +616,21 @@ files = [ [[package]] name = "deprecated" -version = "1.2.15" +version = "1.2.18" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" groups = ["charm-libs"] files = [ - {file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"}, - {file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"}, + {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, + {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, ] [package.dependencies] wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "jinja2 (>=3.0.3,<3.1.0)", "setuptools", "sphinx (<2)", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"] [[package]] name = "exceptiongroup" @@ -690,14 +690,14 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] [[package]] name = "googleapis-common-protos" -version = "1.66.0" +version = "1.67.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" groups = ["charm-libs"] files = [ - {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, - {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, + {file = "googleapis_common_protos-1.67.0-py2.py3-none-any.whl", hash = "sha256:579de760800d13616f51cf8be00c876f00a9f146d3e6510e19d1f4111758b741"}, + {file = "googleapis_common_protos-1.67.0.tar.gz", hash = "sha256:21398025365f138be356d5923e9168737d94d46a72aefee4a6110a1f23463c86"}, ] [package.dependencies] @@ -849,14 +849,14 @@ tomli = {version = "*", markers = "python_version > \"3.6\" and python_version < [[package]] name = "ipython" -version = "8.31.0" +version = "8.32.0" description = "IPython: Productive Interactive Computing" optional = false python-versions = ">=3.10" groups = ["integration"] files = [ - {file = "ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6"}, - {file = "ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b"}, + {file = "ipython-8.32.0-py3-none-any.whl", hash = "sha256:cae85b0c61eff1fc48b0a8002de5958b6528fa9c8defb1894da63f42613708aa"}, + {file = "ipython-8.32.0.tar.gz", hash = "sha256:be2c91895b0b9ea7ba49d33b23e2040c352b33eb6a519cca7ce6e0c743444251"}, ] [package.dependencies] @@ -908,14 +908,14 @@ testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] [[package]] name = "jinja2" -version = "3.1.5" +version = "3.1.6" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" groups = ["main", "integration"] files = [ - {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, - {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, ] [package.dependencies] @@ -1301,14 +1301,14 @@ files = [ [[package]] name = "ops" -version = "2.17.1" +version = "2.18.1" description = "The Python library behind great charms" optional = false python-versions = ">=3.8" groups = ["main", "charm-libs"] files = [ - {file = "ops-2.17.1-py3-none-any.whl", hash = "sha256:0fabc45740d59619c3265328f51f71f99b06557e22493cdd32d10c2b25bcd553"}, - {file = "ops-2.17.1.tar.gz", hash = "sha256:de2d1dd382b4a5f3df3ba78a5266d59462644f3f8ea0f4e7479a248998862a3f"}, + {file = "ops-2.18.1-py3-none-any.whl", hash = "sha256:ba0312366e25b3ae90cf4b8d0af6ea6b612d4951500f856bce609cdb25c9bdeb"}, + {file = "ops-2.18.1.tar.gz", hash = "sha256:5619deb370c00ea851f9579b780a09b88b1a1d020e58e1ed81d31c8fb7b28c8a"}, ] [package.dependencies] @@ -1316,7 +1316,7 @@ PyYAML = "==6.*" websocket-client = "==1.*" [package.extras] -docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "ops-scenario (>=7.0.5,<8)", "pyspelling", "sphinx (>=8.0.0,<8.1.0)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] +docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (>=8.0.0,<8.1.0)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] testing = ["ops-scenario (>=7.0.5,<8)"] [[package]] @@ -1333,14 +1333,14 @@ files = [ [[package]] name = "paramiko" -version = "3.5.0" +version = "3.5.1" description = "SSH2 protocol library" optional = false python-versions = ">=3.6" groups = ["integration"] files = [ - {file = "paramiko-3.5.0-py3-none-any.whl", hash = "sha256:1fedf06b085359051cd7d0d270cebe19e755a8a921cc2ddbfa647fb0cd7d68f9"}, - {file = "paramiko-3.5.0.tar.gz", hash = "sha256:ad11e540da4f55cedda52931f1a3f812a8238a7af7f62a60de538cd80bb28124"}, + {file = "paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61"}, + {file = "paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822"}, ] [package.dependencies] @@ -1415,14 +1415,14 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "poetry-core" -version = "2.0.1" +version = "2.1.1" description = "Poetry PEP 517 Build Backend" optional = false python-versions = "<4.0,>=3.9" groups = ["charm-libs"] files = [ - {file = "poetry_core-2.0.1-py3-none-any.whl", hash = "sha256:a3c7009536522cda4eb0fb3805c9dc935b5537f8727dd01efb9c15e51a17552b"}, - {file = "poetry_core-2.0.1.tar.gz", hash = "sha256:10177c2772469d9032a49f0d8707af761b1c597cea3b4fb31546e5cd436eb157"}, + {file = "poetry_core-2.1.1-py3-none-any.whl", hash = "sha256:bc3b0382ab4d00d5d780277fd0aad1580eb4403613b37fc60fec407b5bee1fe6"}, + {file = "poetry_core-2.1.1.tar.gz", hash = "sha256:c1a1f6f00e4254742f40988a8caf665549101cf9991122cd5de1198897768b1a"}, ] [[package]] @@ -1442,23 +1442,23 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "4.25.5" +version = "4.25.6" description = "" optional = false python-versions = ">=3.8" groups = ["charm-libs", "integration"] files = [ - {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, - {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, - {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, - {file = "protobuf-4.25.5-cp38-cp38-win32.whl", hash = "sha256:98d8d8aa50de6a2747efd9cceba361c9034050ecce3e09136f90de37ddba66e1"}, - {file = "protobuf-4.25.5-cp38-cp38-win_amd64.whl", hash = "sha256:b0234dd5a03049e4ddd94b93400b67803c823cfc405689688f59b34e0742381a"}, - {file = "protobuf-4.25.5-cp39-cp39-win32.whl", hash = "sha256:abe32aad8561aa7cc94fc7ba4fdef646e576983edb94a73381b03c53728a626f"}, - {file = "protobuf-4.25.5-cp39-cp39-win_amd64.whl", hash = "sha256:7a183f592dc80aa7c8da7ad9e55091c4ffc9497b3054452d629bb85fa27c2a45"}, - {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, - {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, + {file = "protobuf-4.25.6-cp310-abi3-win32.whl", hash = "sha256:61df6b5786e2b49fc0055f636c1e8f0aff263808bb724b95b164685ac1bcc13a"}, + {file = "protobuf-4.25.6-cp310-abi3-win_amd64.whl", hash = "sha256:b8f837bfb77513fe0e2f263250f423217a173b6d85135be4d81e96a4653bcd3c"}, + {file = "protobuf-4.25.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6d4381f2417606d7e01750e2729fe6fbcda3f9883aa0c32b51d23012bded6c91"}, + {file = "protobuf-4.25.6-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:5dd800da412ba7f6f26d2c08868a5023ce624e1fdb28bccca2dc957191e81fb5"}, + {file = "protobuf-4.25.6-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:4434ff8bb5576f9e0c78f47c41cdf3a152c0b44de475784cd3fd170aef16205a"}, + {file = "protobuf-4.25.6-cp38-cp38-win32.whl", hash = "sha256:8bad0f9e8f83c1fbfcc34e573352b17dfce7d0519512df8519994168dc015d7d"}, + {file = "protobuf-4.25.6-cp38-cp38-win_amd64.whl", hash = "sha256:b6905b68cde3b8243a198268bb46fbec42b3455c88b6b02fb2529d2c306d18fc"}, + {file = "protobuf-4.25.6-cp39-cp39-win32.whl", hash = "sha256:3f3b0b39db04b509859361ac9bca65a265fe9342e6b9406eda58029f5b1d10b2"}, + {file = "protobuf-4.25.6-cp39-cp39-win_amd64.whl", hash = "sha256:6ef2045f89d4ad8d95fd43cd84621487832a61d15b49500e4c1350e8a0ef96be"}, + {file = "protobuf-4.25.6-py3-none-any.whl", hash = "sha256:07972021c8e30b870cfc0863409d033af940213e0e7f64e27fe017b929d2c9f7"}, + {file = "protobuf-4.25.6.tar.gz", hash = "sha256:f8cfbae7c5afd0d0eaccbe73267339bff605a2315860bb1ba08eb66670a9a91f"}, ] [[package]] @@ -1809,33 +1809,16 @@ pytest = ">=7.0.0" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"] -[[package]] -name = "pytest-github-secrets" -version = "0.1.0" -description = "" -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [] -develop = false - -[package.source] -type = "git" -url = "https://github.com/canonical/data-platform-workflows" -reference = "v29.0.5" -resolved_reference = "0e591badc29bcde5039e6bd58da26f34ef065c0f" -subdirectory = "python/pytest_plugins/github_secrets" - [[package]] name = "pytest-operator" -version = "0.39.0" +version = "0.40.0" description = "Fixtures for Operators" optional = false python-versions = "*" groups = ["integration"] files = [ - {file = "pytest_operator-0.39.0-py3-none-any.whl", hash = "sha256:ade76e1896eaf7f71704b537fd6661a705d81a045b8db71531d9e4741913fa19"}, - {file = "pytest_operator-0.39.0.tar.gz", hash = "sha256:b66bd8c6d161593c258a5714118a51e9f37721e7cd9e503299423d8a7d900f90"}, + {file = "pytest_operator-0.40.0-py3-none-any.whl", hash = "sha256:1cfa93ab61b11e8d7bf58dbb1a39e75fcbfcc084781bb571fde08fda7e236713"}, + {file = "pytest_operator-0.40.0.tar.gz", hash = "sha256:45394ade32b7765b6ba89871b676d1fb8aa7578589f74df26ff0fca4692d1c7b"}, ] [package.dependencies] @@ -1846,46 +1829,6 @@ pytest = "*" pytest-asyncio = "<0.23" pyyaml = "*" -[[package]] -name = "pytest-operator-cache" -version = "0.1.0" -description = "" -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [] -develop = false - -[package.dependencies] -pyyaml = "*" - -[package.source] -type = "git" -url = "https://github.com/canonical/data-platform-workflows" -reference = "v29.0.5" -resolved_reference = "0e591badc29bcde5039e6bd58da26f34ef065c0f" -subdirectory = "python/pytest_plugins/pytest_operator_cache" - -[[package]] -name = "pytest-operator-groups" -version = "0.1.0" -description = "" -optional = false -python-versions = ">=3.8" -groups = ["integration"] -files = [] -develop = false - -[package.dependencies] -pytest = "*" - -[package.source] -type = "git" -url = "https://github.com/canonical/data-platform-workflows" -reference = "v29.0.5" -resolved_reference = "0e591badc29bcde5039e6bd58da26f34ef065c0f" -subdirectory = "python/pytest_plugins/pytest_operator_groups" - [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1903,14 +1846,14 @@ six = ">=1.5" [[package]] name = "pytz" -version = "2024.2" +version = "2025.1" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" groups = ["integration"] files = [ - {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, - {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, + {file = "pytz-2025.1-py2.py3-none-any.whl", hash = "sha256:89dd22dca55b46eac6eda23b2d72721bf1bdfef212645d81513ef5d03038de57"}, + {file = "pytz-2025.1.tar.gz", hash = "sha256:c2db42be2a2518b28e65f9207c4d05e6ff547d1efa4086469ef855e4ab70178e"}, ] [[package]] @@ -1978,14 +1921,14 @@ files = [ [[package]] name = "referencing" -version = "0.36.1" +version = "0.36.2" description = "JSON Referencing + Python" optional = false python-versions = ">=3.9" groups = ["charm-libs"] files = [ - {file = "referencing-0.36.1-py3-none-any.whl", hash = "sha256:363d9c65f080d0d70bc41c721dce3c7f3e77fc09f269cd5c8813da18069a6794"}, - {file = "referencing-0.36.1.tar.gz", hash = "sha256:ca2e6492769e3602957e9b831b94211599d2aade9477f5d44110d2530cf9aade"}, + {file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"}, + {file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"}, ] [package.dependencies] @@ -2164,30 +2107,30 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.9.2" +version = "0.9.6" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["format"] files = [ - {file = "ruff-0.9.2-py3-none-linux_armv6l.whl", hash = "sha256:80605a039ba1454d002b32139e4970becf84b5fee3a3c3bf1c2af6f61a784347"}, - {file = "ruff-0.9.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b9aab82bb20afd5f596527045c01e6ae25a718ff1784cb92947bff1f83068b00"}, - {file = "ruff-0.9.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fbd337bac1cfa96be615f6efcd4bc4d077edbc127ef30e2b8ba2a27e18c054d4"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b35259b0cbf8daa22a498018e300b9bb0174c2bbb7bcba593935158a78054d"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8b6a9701d1e371bf41dca22015c3f89769da7576884d2add7317ec1ec8cb9c3c"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9cc53e68b3c5ae41e8faf83a3b89f4a5d7b2cb666dff4b366bb86ed2a85b481f"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8efd9da7a1ee314b910da155ca7e8953094a7c10d0c0a39bfde3fcfd2a015684"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3292c5a22ea9a5f9a185e2d131dc7f98f8534a32fb6d2ee7b9944569239c648d"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a605fdcf6e8b2d39f9436d343d1f0ff70c365a1e681546de0104bef81ce88df"}, - {file = "ruff-0.9.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c547f7f256aa366834829a08375c297fa63386cbe5f1459efaf174086b564247"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d18bba3d3353ed916e882521bc3e0af403949dbada344c20c16ea78f47af965e"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:b338edc4610142355ccf6b87bd356729b62bf1bc152a2fad5b0c7dc04af77bfe"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:492a5e44ad9b22a0ea98cf72e40305cbdaf27fac0d927f8bc9e1df316dcc96eb"}, - {file = "ruff-0.9.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:af1e9e9fe7b1f767264d26b1075ac4ad831c7db976911fa362d09b2d0356426a"}, - {file = "ruff-0.9.2-py3-none-win32.whl", hash = "sha256:71cbe22e178c5da20e1514e1e01029c73dc09288a8028a5d3446e6bba87a5145"}, - {file = "ruff-0.9.2-py3-none-win_amd64.whl", hash = "sha256:c5e1d6abc798419cf46eed03f54f2e0c3adb1ad4b801119dedf23fcaf69b55b5"}, - {file = "ruff-0.9.2-py3-none-win_arm64.whl", hash = "sha256:a1b63fa24149918f8b37cef2ee6fff81f24f0d74b6f0bdc37bc3e1f2143e41c6"}, - {file = "ruff-0.9.2.tar.gz", hash = "sha256:b5eceb334d55fae5f316f783437392642ae18e16dcf4f1858d55d3c2a0f8f5d0"}, + {file = "ruff-0.9.6-py3-none-linux_armv6l.whl", hash = "sha256:2f218f356dd2d995839f1941322ff021c72a492c470f0b26a34f844c29cdf5ba"}, + {file = "ruff-0.9.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:b908ff4df65dad7b251c9968a2e4560836d8f5487c2f0cc238321ed951ea0504"}, + {file = "ruff-0.9.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:b109c0ad2ececf42e75fa99dc4043ff72a357436bb171900714a9ea581ddef83"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1de4367cca3dac99bcbd15c161404e849bb0bfd543664db39232648dc00112dc"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac3ee4d7c2c92ddfdaedf0bf31b2b176fa7aa8950efc454628d477394d35638b"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5dc1edd1775270e6aa2386119aea692039781429f0be1e0949ea5884e011aa8e"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4a091729086dffa4bd070aa5dab7e39cc6b9d62eb2bef8f3d91172d30d599666"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1bbc6808bf7b15796cef0815e1dfb796fbd383e7dbd4334709642649625e7c5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:589d1d9f25b5754ff230dce914a174a7c951a85a4e9270613a2b74231fdac2f5"}, + {file = "ruff-0.9.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc61dd5131742e21103fbbdcad683a8813be0e3c204472d520d9a5021ca8b217"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:5e2d9126161d0357e5c8f30b0bd6168d2c3872372f14481136d13de9937f79b6"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:68660eab1a8e65babb5229a1f97b46e3120923757a68b5413d8561f8a85d4897"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c4cae6c4cc7b9b4017c71114115db0445b00a16de3bcde0946273e8392856f08"}, + {file = "ruff-0.9.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19f505b643228b417c1111a2a536424ddde0db4ef9023b9e04a46ed8a1cb4656"}, + {file = "ruff-0.9.6-py3-none-win32.whl", hash = "sha256:194d8402bceef1b31164909540a597e0d913c0e4952015a5b40e28c146121b5d"}, + {file = "ruff-0.9.6-py3-none-win_amd64.whl", hash = "sha256:03482d5c09d90d4ee3f40d97578423698ad895c87314c4de39ed2af945633caa"}, + {file = "ruff-0.9.6-py3-none-win_arm64.whl", hash = "sha256:0e2bb706a2be7ddfea4a4af918562fdc1bcb16df255e5fa595bbd800ce322a5a"}, + {file = "ruff-0.9.6.tar.gz", hash = "sha256:81761592f72b620ec8fa1068a6fd00e98a5ebee342a3642efd84454f3031dca9"}, ] [[package]] @@ -2416,81 +2359,81 @@ test = ["websockets"] [[package]] name = "websockets" -version = "14.2" +version = "15.0" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = false python-versions = ">=3.9" groups = ["integration"] files = [ - {file = "websockets-14.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e8179f95323b9ab1c11723e5d91a89403903f7b001828161b480a7810b334885"}, - {file = "websockets-14.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d8c3e2cdb38f31d8bd7d9d28908005f6fa9def3324edb9bf336d7e4266fd397"}, - {file = "websockets-14.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:714a9b682deb4339d39ffa674f7b674230227d981a37d5d174a4a83e3978a610"}, - {file = "websockets-14.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2e53c72052f2596fb792a7acd9704cbc549bf70fcde8a99e899311455974ca3"}, - {file = "websockets-14.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3fbd68850c837e57373d95c8fe352203a512b6e49eaae4c2f4088ef8cf21980"}, - {file = "websockets-14.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b27ece32f63150c268593d5fdb82819584831a83a3f5809b7521df0685cd5d8"}, - {file = "websockets-14.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4daa0faea5424d8713142b33825fff03c736f781690d90652d2c8b053345b0e7"}, - {file = "websockets-14.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:bc63cee8596a6ec84d9753fd0fcfa0452ee12f317afe4beae6b157f0070c6c7f"}, - {file = "websockets-14.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a570862c325af2111343cc9b0257b7119b904823c675b22d4ac547163088d0d"}, - {file = "websockets-14.2-cp310-cp310-win32.whl", hash = "sha256:75862126b3d2d505e895893e3deac0a9339ce750bd27b4ba515f008b5acf832d"}, - {file = "websockets-14.2-cp310-cp310-win_amd64.whl", hash = "sha256:cc45afb9c9b2dc0852d5c8b5321759cf825f82a31bfaf506b65bf4668c96f8b2"}, - {file = "websockets-14.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3bdc8c692c866ce5fefcaf07d2b55c91d6922ac397e031ef9b774e5b9ea42166"}, - {file = "websockets-14.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c93215fac5dadc63e51bcc6dceca72e72267c11def401d6668622b47675b097f"}, - {file = "websockets-14.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1c9b6535c0e2cf8a6bf938064fb754aaceb1e6a4a51a80d884cd5db569886910"}, - {file = "websockets-14.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a52a6d7cf6938e04e9dceb949d35fbdf58ac14deea26e685ab6368e73744e4c"}, - {file = "websockets-14.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9f05702e93203a6ff5226e21d9b40c037761b2cfb637187c9802c10f58e40473"}, - {file = "websockets-14.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22441c81a6748a53bfcb98951d58d1af0661ab47a536af08920d129b4d1c3473"}, - {file = "websockets-14.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd9b868d78b194790e6236d9cbc46d68aba4b75b22497eb4ab64fa640c3af56"}, - {file = "websockets-14.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1a5a20d5843886d34ff8c57424cc65a1deda4375729cbca4cb6b3353f3ce4142"}, - {file = "websockets-14.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:34277a29f5303d54ec6468fb525d99c99938607bc96b8d72d675dee2b9f5bf1d"}, - {file = "websockets-14.2-cp311-cp311-win32.whl", hash = "sha256:02687db35dbc7d25fd541a602b5f8e451a238ffa033030b172ff86a93cb5dc2a"}, - {file = "websockets-14.2-cp311-cp311-win_amd64.whl", hash = "sha256:862e9967b46c07d4dcd2532e9e8e3c2825e004ffbf91a5ef9dde519ee2effb0b"}, - {file = "websockets-14.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1f20522e624d7ffbdbe259c6b6a65d73c895045f76a93719aa10cd93b3de100c"}, - {file = "websockets-14.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:647b573f7d3ada919fd60e64d533409a79dcf1ea21daeb4542d1d996519ca967"}, - {file = "websockets-14.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6af99a38e49f66be5a64b1e890208ad026cda49355661549c507152113049990"}, - {file = "websockets-14.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:091ab63dfc8cea748cc22c1db2814eadb77ccbf82829bac6b2fbe3401d548eda"}, - {file = "websockets-14.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b374e8953ad477d17e4851cdc66d83fdc2db88d9e73abf755c94510ebddceb95"}, - {file = "websockets-14.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a39d7eceeea35db85b85e1169011bb4321c32e673920ae9c1b6e0978590012a3"}, - {file = "websockets-14.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0a6f3efd47ffd0d12080594f434faf1cd2549b31e54870b8470b28cc1d3817d9"}, - {file = "websockets-14.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:065ce275e7c4ffb42cb738dd6b20726ac26ac9ad0a2a48e33ca632351a737267"}, - {file = "websockets-14.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e9d0e53530ba7b8b5e389c02282f9d2aa47581514bd6049d3a7cffe1385cf5fe"}, - {file = "websockets-14.2-cp312-cp312-win32.whl", hash = "sha256:20e6dd0984d7ca3037afcb4494e48c74ffb51e8013cac71cf607fffe11df7205"}, - {file = "websockets-14.2-cp312-cp312-win_amd64.whl", hash = "sha256:44bba1a956c2c9d268bdcdf234d5e5ff4c9b6dc3e300545cbe99af59dda9dcce"}, - {file = "websockets-14.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6f1372e511c7409a542291bce92d6c83320e02c9cf392223272287ce55bc224e"}, - {file = "websockets-14.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4da98b72009836179bb596a92297b1a61bb5a830c0e483a7d0766d45070a08ad"}, - {file = "websockets-14.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8a86a269759026d2bde227652b87be79f8a734e582debf64c9d302faa1e9f03"}, - {file = "websockets-14.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86cf1aaeca909bf6815ea714d5c5736c8d6dd3a13770e885aafe062ecbd04f1f"}, - {file = "websockets-14.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9b0f6c3ba3b1240f602ebb3971d45b02cc12bd1845466dd783496b3b05783a5"}, - {file = "websockets-14.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669c3e101c246aa85bc8534e495952e2ca208bd87994650b90a23d745902db9a"}, - {file = "websockets-14.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eabdb28b972f3729348e632ab08f2a7b616c7e53d5414c12108c29972e655b20"}, - {file = "websockets-14.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2066dc4cbcc19f32c12a5a0e8cc1b7ac734e5b64ac0a325ff8353451c4b15ef2"}, - {file = "websockets-14.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ab95d357cd471df61873dadf66dd05dd4709cae001dd6342edafc8dc6382f307"}, - {file = "websockets-14.2-cp313-cp313-win32.whl", hash = "sha256:a9e72fb63e5f3feacdcf5b4ff53199ec8c18d66e325c34ee4c551ca748623bbc"}, - {file = "websockets-14.2-cp313-cp313-win_amd64.whl", hash = "sha256:b439ea828c4ba99bb3176dc8d9b933392a2413c0f6b149fdcba48393f573377f"}, - {file = "websockets-14.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7cd5706caec1686c5d233bc76243ff64b1c0dc445339bd538f30547e787c11fe"}, - {file = "websockets-14.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ec607328ce95a2f12b595f7ae4c5d71bf502212bddcea528290b35c286932b12"}, - {file = "websockets-14.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:da85651270c6bfb630136423037dd4975199e5d4114cae6d3066641adcc9d1c7"}, - {file = "websockets-14.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3ecadc7ce90accf39903815697917643f5b7cfb73c96702318a096c00aa71f5"}, - {file = "websockets-14.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1979bee04af6a78608024bad6dfcc0cc930ce819f9e10342a29a05b5320355d0"}, - {file = "websockets-14.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2dddacad58e2614a24938a50b85969d56f88e620e3f897b7d80ac0d8a5800258"}, - {file = "websockets-14.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:89a71173caaf75fa71a09a5f614f450ba3ec84ad9fca47cb2422a860676716f0"}, - {file = "websockets-14.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:6af6a4b26eea4fc06c6818a6b962a952441e0e39548b44773502761ded8cc1d4"}, - {file = "websockets-14.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:80c8efa38957f20bba0117b48737993643204645e9ec45512579132508477cfc"}, - {file = "websockets-14.2-cp39-cp39-win32.whl", hash = "sha256:2e20c5f517e2163d76e2729104abc42639c41cf91f7b1839295be43302713661"}, - {file = "websockets-14.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4c8cef610e8d7c70dea92e62b6814a8cd24fbd01d7103cc89308d2bfe1659ef"}, - {file = "websockets-14.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d7d9cafbccba46e768be8a8ad4635fa3eae1ffac4c6e7cb4eb276ba41297ed29"}, - {file = "websockets-14.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:c76193c1c044bd1e9b3316dcc34b174bbf9664598791e6fb606d8d29000e070c"}, - {file = "websockets-14.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd475a974d5352390baf865309fe37dec6831aafc3014ffac1eea99e84e83fc2"}, - {file = "websockets-14.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6c0097a41968b2e2b54ed3424739aab0b762ca92af2379f152c1aef0187e1c"}, - {file = "websockets-14.2-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7ff794c8b36bc402f2e07c0b2ceb4a2424147ed4785ff03e2a7af03711d60a"}, - {file = "websockets-14.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:dec254fcabc7bd488dab64846f588fc5b6fe0d78f641180030f8ea27b76d72c3"}, - {file = "websockets-14.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bbe03eb853e17fd5b15448328b4ec7fb2407d45fb0245036d06a3af251f8e48f"}, - {file = "websockets-14.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a3c4aa3428b904d5404a0ed85f3644d37e2cb25996b7f096d77caeb0e96a3b42"}, - {file = "websockets-14.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:577a4cebf1ceaf0b65ffc42c54856214165fb8ceeba3935852fc33f6b0c55e7f"}, - {file = "websockets-14.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad1c1d02357b7665e700eca43a31d52814ad9ad9b89b58118bdabc365454b574"}, - {file = "websockets-14.2-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f390024a47d904613577df83ba700bd189eedc09c57af0a904e5c39624621270"}, - {file = "websockets-14.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3c1426c021c38cf92b453cdf371228d3430acd775edee6bac5a4d577efc72365"}, - {file = "websockets-14.2-py3-none-any.whl", hash = "sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b"}, - {file = "websockets-14.2.tar.gz", hash = "sha256:5059ed9c54945efb321f097084b4c7e52c246f2c869815876a69d1efc4ad6eb5"}, + {file = "websockets-15.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5e6ee18a53dd5743e6155b8ff7e8e477c25b29b440f87f65be8165275c87fef0"}, + {file = "websockets-15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ee06405ea2e67366a661ed313e14cf2a86e84142a3462852eb96348f7219cee3"}, + {file = "websockets-15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8711682a629bbcaf492f5e0af72d378e976ea1d127a2d47584fa1c2c080b436b"}, + {file = "websockets-15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94c4a9b01eede952442c088d415861b0cf2053cbd696b863f6d5022d4e4e2453"}, + {file = "websockets-15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45535fead66e873f411c1d3cf0d3e175e66f4dd83c4f59d707d5b3e4c56541c4"}, + {file = "websockets-15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e389efe46ccb25a1f93d08c7a74e8123a2517f7b7458f043bd7529d1a63ffeb"}, + {file = "websockets-15.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:67a04754d121ea5ca39ddedc3f77071651fb5b0bc6b973c71c515415b44ed9c5"}, + {file = "websockets-15.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:bd66b4865c8b853b8cca7379afb692fc7f52cf898786537dfb5e5e2d64f0a47f"}, + {file = "websockets-15.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a4cc73a6ae0a6751b76e69cece9d0311f054da9b22df6a12f2c53111735657c8"}, + {file = "websockets-15.0-cp310-cp310-win32.whl", hash = "sha256:89da58e4005e153b03fe8b8794330e3f6a9774ee9e1c3bd5bc52eb098c3b0c4f"}, + {file = "websockets-15.0-cp310-cp310-win_amd64.whl", hash = "sha256:4ff380aabd7a74a42a760ee76c68826a8f417ceb6ea415bd574a035a111fd133"}, + {file = "websockets-15.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:dd24c4d256558429aeeb8d6c24ebad4e982ac52c50bc3670ae8646c181263965"}, + {file = "websockets-15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f83eca8cbfd168e424dfa3b3b5c955d6c281e8fc09feb9d870886ff8d03683c7"}, + {file = "websockets-15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4095a1f2093002c2208becf6f9a178b336b7572512ee0a1179731acb7788e8ad"}, + {file = "websockets-15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb915101dfbf318486364ce85662bb7b020840f68138014972c08331458d41f3"}, + {file = "websockets-15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45d464622314973d78f364689d5dbb9144e559f93dca11b11af3f2480b5034e1"}, + {file = "websockets-15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ace960769d60037ca9625b4c578a6f28a14301bd2a1ff13bb00e824ac9f73e55"}, + {file = "websockets-15.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c7cd4b1015d2f60dfe539ee6c95bc968d5d5fad92ab01bb5501a77393da4f596"}, + {file = "websockets-15.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4f7290295794b5dec470867c7baa4a14182b9732603fd0caf2a5bf1dc3ccabf3"}, + {file = "websockets-15.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3abd670ca7ce230d5a624fd3d55e055215d8d9b723adee0a348352f5d8d12ff4"}, + {file = "websockets-15.0-cp311-cp311-win32.whl", hash = "sha256:110a847085246ab8d4d119632145224d6b49e406c64f1bbeed45c6f05097b680"}, + {file = "websockets-15.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7bbbe2cd6ed80aceef2a14e9f1c1b61683194c216472ed5ff33b700e784e37"}, + {file = "websockets-15.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cccc18077acd34c8072578394ec79563664b1c205f7a86a62e94fafc7b59001f"}, + {file = "websockets-15.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d4c22992e24f12de340ca5f824121a5b3e1a37ad4360b4e1aaf15e9d1c42582d"}, + {file = "websockets-15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1206432cc6c644f6fc03374b264c5ff805d980311563202ed7fef91a38906276"}, + {file = "websockets-15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d3cc75ef3e17490042c47e0523aee1bcc4eacd2482796107fd59dd1100a44bc"}, + {file = "websockets-15.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b89504227a5311610e4be16071465885a0a3d6b0e82e305ef46d9b064ce5fb72"}, + {file = "websockets-15.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56e3efe356416bc67a8e093607315951d76910f03d2b3ad49c4ade9207bf710d"}, + {file = "websockets-15.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0f2205cdb444a42a7919690238fb5979a05439b9dbb73dd47c863d39640d85ab"}, + {file = "websockets-15.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:aea01f40995fa0945c020228ab919b8dfc93fc8a9f2d3d705ab5b793f32d9e99"}, + {file = "websockets-15.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9f8e33747b1332db11cf7fcf4a9512bef9748cb5eb4d3f7fbc8c30d75dc6ffc"}, + {file = "websockets-15.0-cp312-cp312-win32.whl", hash = "sha256:32e02a2d83f4954aa8c17e03fe8ec6962432c39aca4be7e8ee346b05a3476904"}, + {file = "websockets-15.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffc02b159b65c05f2ed9ec176b715b66918a674bd4daed48a9a7a590dd4be1aa"}, + {file = "websockets-15.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d2244d8ab24374bed366f9ff206e2619345f9cd7fe79aad5225f53faac28b6b1"}, + {file = "websockets-15.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3a302241fbe825a3e4fe07666a2ab513edfdc6d43ce24b79691b45115273b5e7"}, + {file = "websockets-15.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:10552fed076757a70ba2c18edcbc601c7637b30cdfe8c24b65171e824c7d6081"}, + {file = "websockets-15.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c53f97032b87a406044a1c33d1e9290cc38b117a8062e8a8b285175d7e2f99c9"}, + {file = "websockets-15.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1caf951110ca757b8ad9c4974f5cac7b8413004d2f29707e4d03a65d54cedf2b"}, + {file = "websockets-15.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bf1ab71f9f23b0a1d52ec1682a3907e0c208c12fef9c3e99d2b80166b17905f"}, + {file = "websockets-15.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bfcd3acc1a81f106abac6afd42327d2cf1e77ec905ae11dc1d9142a006a496b6"}, + {file = "websockets-15.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c8c5c8e1bac05ef3c23722e591ef4f688f528235e2480f157a9cfe0a19081375"}, + {file = "websockets-15.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:86bfb52a9cfbcc09aba2b71388b0a20ea5c52b6517c0b2e316222435a8cdab72"}, + {file = "websockets-15.0-cp313-cp313-win32.whl", hash = "sha256:26ba70fed190708551c19a360f9d7eca8e8c0f615d19a574292b7229e0ae324c"}, + {file = "websockets-15.0-cp313-cp313-win_amd64.whl", hash = "sha256:ae721bcc8e69846af00b7a77a220614d9b2ec57d25017a6bbde3a99473e41ce8"}, + {file = "websockets-15.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c348abc5924caa02a62896300e32ea80a81521f91d6db2e853e6b1994017c9f6"}, + {file = "websockets-15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5294fcb410ed0a45d5d1cdedc4e51a60aab5b2b3193999028ea94afc2f554b05"}, + {file = "websockets-15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c24ba103ecf45861e2e1f933d40b2d93f5d52d8228870c3e7bf1299cd1cb8ff1"}, + {file = "websockets-15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8821a03bcfb36e4e4705316f6b66af28450357af8a575dc8f4b09bf02a3dee"}, + {file = "websockets-15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc5ae23ada6515f31604f700009e2df90b091b67d463a8401c1d8a37f76c1d7"}, + {file = "websockets-15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ac67b542505186b3bbdaffbc303292e1ee9c8729e5d5df243c1f20f4bb9057e"}, + {file = "websockets-15.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c86dc2068f1c5ca2065aca34f257bbf4f78caf566eb230f692ad347da191f0a1"}, + {file = "websockets-15.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:30cff3ef329682b6182c01c568f551481774c476722020b8f7d0daacbed07a17"}, + {file = "websockets-15.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:98dcf978d4c6048965d1762abd534c9d53bae981a035bfe486690ba11f49bbbb"}, + {file = "websockets-15.0-cp39-cp39-win32.whl", hash = "sha256:37d66646f929ae7c22c79bc73ec4074d6db45e6384500ee3e0d476daf55482a9"}, + {file = "websockets-15.0-cp39-cp39-win_amd64.whl", hash = "sha256:24d5333a9b2343330f0f4eb88546e2c32a7f5c280f8dd7d3cc079beb0901781b"}, + {file = "websockets-15.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b499caef4bca9cbd0bd23cd3386f5113ee7378094a3cb613a2fa543260fe9506"}, + {file = "websockets-15.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:17f2854c6bd9ee008c4b270f7010fe2da6c16eac5724a175e75010aacd905b31"}, + {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89f72524033abbfde880ad338fd3c2c16e31ae232323ebdfbc745cbb1b3dcc03"}, + {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1657a9eecb29d7838e3b415458cc494e6d1b194f7ac73a34aa55c6fb6c72d1f3"}, + {file = "websockets-15.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e413352a921f5ad5d66f9e2869b977e88d5103fc528b6deb8423028a2befd842"}, + {file = "websockets-15.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8561c48b0090993e3b2a54db480cab1d23eb2c5735067213bb90f402806339f5"}, + {file = "websockets-15.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:190bc6ef8690cd88232a038d1b15714c258f79653abad62f7048249b09438af3"}, + {file = "websockets-15.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:327adab7671f3726b0ba69be9e865bba23b37a605b585e65895c428f6e47e766"}, + {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bd8ef197c87afe0a9009f7a28b5dc613bfc585d329f80b7af404e766aa9e8c7"}, + {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:789c43bf4a10cd067c24c321238e800b8b2716c863ddb2294d2fed886fa5a689"}, + {file = "websockets-15.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7394c0b7d460569c9285fa089a429f58465db930012566c03046f9e3ab0ed181"}, + {file = "websockets-15.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ea4f210422b912ebe58ef0ad33088bc8e5c5ff9655a8822500690abc3b1232d"}, + {file = "websockets-15.0-py3-none-any.whl", hash = "sha256:51ffd53c53c4442415b613497a34ba0aa7b99ac07f1e4a62db5dcd640ae6c3c3"}, + {file = "websockets-15.0.tar.gz", hash = "sha256:ca36151289a15b39d8d683fd8b7abbe26fc50be311066c5f8dcf3cb8cee107ab"}, ] [[package]] @@ -2605,4 +2548,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.10" -content-hash = "d0e70b99ef8db156e83d9c7cf4045df90d299b8aff8099ac70dbf63ba0327979" +content-hash = "38d461f9c341e81b1034d0b3d789f39a5b6cb7c5fe83dbf3845e334e8c93d9a2" diff --git a/pyproject.toml b/pyproject.toml index 89ab600fdc..bb516baa17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,8 +7,8 @@ requires-poetry = ">=2.0.0" [tool.poetry.dependencies] python = "^3.10" -ops = "^2.17.1" -boto3 = "^1.35.70" +ops = "^2.18.1" +boto3 = "^1.35.99" pgconnstr = "^1.0.1" requests = "^2.32.3" tenacity = "^9.0.0" @@ -27,7 +27,7 @@ poetry-core = "*" # tempo_coordinator_k8s/v0/tracing.py requires pydantic pydantic = "^1.10" # loki_k8s/v1/loki_push_api.py and prometheus_k8s/v0/prometheus_scrape.py -cosl = "*" +cosl = ">=0.0.50" # tempo_coordinator_k8s/v0/charm_tracing.py opentelemetry-exporter-otlp-proto-http = "1.21.0" # tls_certificates_interface/v2/tls_certificates.py @@ -38,19 +38,19 @@ jsonschema = "*" optional = true [tool.poetry.group.format.dependencies] -ruff = "^0.9.2" +ruff = "^0.9.6" [tool.poetry.group.lint] optional = true [tool.poetry.group.lint.dependencies] -codespell = "^2.4.0" +codespell = "^2.4.1" [tool.poetry.group.unit] optional = true [tool.poetry.group.unit.dependencies] -coverage = {extras = ["toml"], version = "^7.6.10"} +coverage = {extras = ["toml"], version = "^7.6.12"} pytest = "^8.3.4" [tool.poetry.group.integration] @@ -59,11 +59,8 @@ optional = true [tool.poetry.group.integration.dependencies] lightkube = "^0.17.1" pytest = "^8.3.4" -pytest-github-secrets = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.0.5", subdirectory = "python/pytest_plugins/github_secrets"} -pytest-operator = "^0.39.0" -pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.0.5", subdirectory = "python/pytest_plugins/pytest_operator_cache"} -pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.0.5", subdirectory = "python/pytest_plugins/pytest_operator_groups"} -allure-pytest-collection-report = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.0.5", subdirectory = "python/pytest_plugins/allure_pytest_collection_report"} +pytest-operator = "^0.40.0" +allure-pytest-default-results = "^0.1.2" # renovate caret doesn't work: https://github.com/renovatebot/renovate/issues/26940 juju = "<=3.6.1.0" psycopg2-binary = "^2.9.10" @@ -89,7 +86,7 @@ exclude_lines = [ minversion = "6.0" log_cli_level = "INFO" asyncio_mode = "auto" -markers = ["unstable", "juju2", "juju3", "juju_secrets"] +markers = ["juju3", "juju_secrets"] # Formatting tools configuration [tool.black] diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 0000000000..e1b55ab3b2 --- /dev/null +++ b/scripts/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2025 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Charm script utilities.""" diff --git a/src/rotate_logs.py b/scripts/rotate_logs.py similarity index 93% rename from src/rotate_logs.py rename to scripts/rotate_logs.py index b19e935573..cb6a3f441b 100644 --- a/src/rotate_logs.py +++ b/scripts/rotate_logs.py @@ -1,4 +1,4 @@ -# Copyright 2024 Canonical Ltd. +# Copyright 2025 Canonical Ltd. # See LICENSE file for licensing details. """Service for rotating logs.""" diff --git a/spread.yaml b/spread.yaml new file mode 100644 index 0000000000..8417539c37 --- /dev/null +++ b/spread.yaml @@ -0,0 +1,125 @@ +project: postgresql-k8s-operator + +backends: + # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L11 + lxd-vm: + # TODO: remove after https://github.com/canonical/spread/pull/185 merged & in charmcraft + type: adhoc + allocate: | + hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])") + VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}" + DISK="${DISK:-20}" + CPU="${CPU:-4}" + MEM="${MEM:-8}" + + cloud_config="#cloud-config + ssh_pwauth: true + users: + - default + - name: runner + plain_text_passwd: $SPREAD_PASSWORD + lock_passwd: false + sudo: ALL=(ALL) NOPASSWD:ALL + " + + lxc launch --vm \ + "${SPREAD_SYSTEM//-/:}" \ + "${VM_NAME}" \ + -c user.user-data="${cloud_config}" \ + -c limits.cpu="${CPU}" \ + -c limits.memory="${MEM}GiB" \ + -d root,size="${DISK}GiB" + + # Wait for the runner user + while ! lxc exec "${VM_NAME}" -- id -u runner &>/dev/null; do sleep 0.5; done + + # Set the instance address for spread + ADDRESS "$(lxc ls -f csv | grep "${VM_NAME}" | cut -d"," -f3 | cut -d" " -f1)" + discard: | + hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])") + VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}" + lxc delete --force "${VM_NAME}" + environment: + CONCIERGE_EXTRA_SNAPS: charmcraft + CONCIERGE_EXTRA_DEBS: pipx + systems: + - ubuntu-24.04: + username: runner + prepare: | + systemctl disable --now unattended-upgrades.service + systemctl mask unattended-upgrades.service + pipx install charmcraftcache + cd "$SPREAD_PATH" + charmcraftcache pack -v + restore-each: | + cd "$SPREAD_PATH" + # Revert python-libjuju version override + git restore pyproject.toml poetry.lock + + # Use instead of `concierge restore` to save time between tests + # For example, with microk8s, using `concierge restore` takes twice as long as this (e.g. 6 + # min instead of 3 min between every spread job) + juju destroy-model --force --no-wait --destroy-storage --no-prompt testing + juju kill-controller --no-prompt concierge-microk8s + restore: | + rm -rf "$SPREAD_PATH" + + github-ci: + type: adhoc + # Only run on CI + manual: true + # HACK: spread requires runners to be accessible via SSH + # Configure local sshd & instruct spread to connect to the same machine spread is running on + # (spread cannot provision GitHub Actions runners, so we provision a GitHub Actions runner for + # each spread job & select a single job when running spread) + # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L47 + allocate: | + sudo tee /etc/ssh/sshd_config.d/10-spread-github-ci.conf << 'EOF' + PasswordAuthentication yes + PermitEmptyPasswords yes + EOF + + ADDRESS localhost + # HACK: spread does not pass environment variables set on runner + # Manually pass specific environment variables + environment: + CI: '$(HOST: echo $CI)' + AWS_ACCESS_KEY: '$(HOST: echo $AWS_ACCESS_KEY)' + AWS_SECRET_KEY: '$(HOST: echo $AWS_SECRET_KEY)' + GCP_ACCESS_KEY: '$(HOST: echo $GCP_ACCESS_KEY)' + GCP_SECRET_KEY: '$(HOST: echo $GCP_SECRET_KEY)' + systems: + - ubuntu-24.04: + username: runner + - ubuntu-24.04-arm: + username: runner + +suites: + tests/spread/: + summary: Spread tests + +path: /root/spread_project + +kill-timeout: 3h +environment: + PATH: $PATH:$(pipx environment --value PIPX_BIN_DIR) + CONCIERGE_JUJU_CHANNEL/juju36: 3.6/stable +prepare: | + snap refresh --hold + chown -R root:root "$SPREAD_PATH" + cd "$SPREAD_PATH" + snap install --classic concierge + + # Install charmcraft & pipx (on lxd-vm backend) + concierge prepare --trace + + pipx install tox poetry +prepare-each: | + cd "$SPREAD_PATH" + # `concierge prepare` needs to be run for each spread job in case Juju version changed + concierge prepare --trace + + # Unable to set constraint on all models because of Juju bug: + # https://bugs.launchpad.net/juju/+bug/2065050 + juju set-model-constraints arch="$(dpkg --print-architecture)" +# Only restore on lxd backend—no need to restore on CI diff --git a/src/backups.py b/src/backups.py index eb75df42b1..2bdc776374 100644 --- a/src/backups.py +++ b/src/backups.py @@ -187,7 +187,10 @@ def can_use_s3_repository(self) -> tuple[bool, str | None]: for line in system_identifier_from_instance.splitlines() if "Database system identifier" in line ).split(" ")[-1] - system_identifier_from_stanza = str(stanza.get("db")[0]["system-id"]) + stanza_dbs = stanza.get("db") + system_identifier_from_stanza = ( + str(stanza_dbs[0]["system-id"]) if len(stanza_dbs) else None + ) if system_identifier_from_instance != system_identifier_from_stanza: logger.debug( f"can_use_s3_repository: incompatible system identifier s3={system_identifier_from_stanza}, local={system_identifier_from_instance}" @@ -669,7 +672,7 @@ def _on_s3_credentials_checks(self, event: CredentialsChangedEvent) -> bool: logger.debug("_on_s3_credential_changed early exit: no connection info") return False - if "cluster_initialised" not in self.charm.app_peer_data: + if not self.charm.is_cluster_initialised: logger.debug("Cannot set pgBackRest configurations, PostgreSQL has not yet started.") event.defer() return False @@ -681,10 +684,7 @@ def _on_s3_credentials_checks(self, event: CredentialsChangedEvent) -> bool: return False # Prevents S3 change in the middle of restoring backup and patroni / pgbackrest errors caused by that. - if ( - "restoring-backup" in self.charm.app_peer_data - or "restore-to-time" in self.charm.app_peer_data - ): + if self.charm.is_cluster_restoring_backup or self.charm.is_cluster_restoring_to_time: logger.info("Cannot change S3 configuration during restore") event.defer() return False @@ -1014,7 +1014,7 @@ def _on_restore_action(self, event): # noqa: C901 ) except ApiError as e: # If previous PITR restore was unsuccessful, there are no such endpoints. - if "restore-to-time" not in self.charm.app_peer_data: + if not self.charm.is_cluster_restoring_to_time: error_message = f"Failed to remove previous cluster information with error: {e!s}" logger.error(f"Restore failed: {error_message}") event.fail(error_message) @@ -1208,7 +1208,7 @@ def _render_pgbackrest_conf_file(self) -> bool: with open("templates/pgbackrest.logrotate.j2") as file: template = Template(file.read()) self.container.push(PGBACKREST_LOGROTATE_FILE, template.render()) - with open("src/rotate_logs.py") as f: + with open("scripts/rotate_logs.py") as f: self.container.push( "/home/postgres/rotate_logs.py", f.read(), diff --git a/src/charm.py b/src/charm.py index a77f2790df..ea4068c117 100755 --- a/src/charm.py +++ b/src/charm.py @@ -34,7 +34,6 @@ from charms.data_platform_libs.v0.data_models import TypedCharmBase from charms.grafana_k8s.v0.grafana_dashboard import GrafanaDashboardProvider from charms.loki_k8s.v1.loki_push_api import LogProxyConsumer -from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch from charms.postgresql_k8s.v0.postgresql import ( REQUIRED_PLUGINS, PostgreSQL, @@ -51,7 +50,7 @@ from lightkube.models.core_v1 import ServicePort, ServiceSpec from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Endpoints, Node, Pod, Service -from ops import main +from ops import JujuVersion, main from ops.charm import ( ActionEvent, HookEvent, @@ -59,7 +58,6 @@ RelationDepartedEvent, WorkloadEvent, ) -from ops.jujuversion import JujuVersion from ops.model import ( ActiveStatus, BlockedStatus, @@ -88,6 +86,7 @@ from constants import ( APP_SCOPE, BACKUP_USER, + DATABASE_DEFAULT_NAME, METRICS_PORT, MONITORING_PASSWORD_KEY, MONITORING_USER, @@ -114,13 +113,12 @@ WORKLOAD_OS_GROUP, WORKLOAD_OS_USER, ) -from patroni import NotReadyError, Patroni, SwitchoverFailedError +from patroni import NotReadyError, Patroni, SwitchoverFailedError, SwitchoverNotSyncError from relations.async_replication import ( REPLICATION_CONSUMER_RELATION, REPLICATION_OFFER_RELATION, PostgreSQLAsyncReplication, ) -from relations.db import EXTENSIONS_BLOCKING_MESSAGE, DbProvides from relations.postgresql_provider import PostgreSQLProvider from upgrade import PostgreSQLUpgrade, get_postgresql_k8s_dependencies_model from utils import any_cpu_to_cores, any_memory_to_bytes, new_password @@ -148,7 +146,6 @@ class CannotConnectError(Exception): @trace_charm( tracing_endpoint="tracing_endpoint", extra_types=( - DbProvides, GrafanaDashboardProvider, LogProxyConsumer, MetricsEndpointProvider, @@ -213,6 +210,7 @@ def __init__(self, *args): self.framework.observe(self.on.stop, self._on_stop) self.framework.observe(self.on.get_password_action, self._on_get_password) self.framework.observe(self.on.set_password_action, self._on_set_password) + self.framework.observe(self.on.promote_to_primary_action, self._on_promote_to_primary) self.framework.observe(self.on.get_primary_action, self._on_get_primary) self.framework.observe(self.on.update_status, self._on_update_status) self._storage_path = self.meta.storages["pgdata"].location @@ -226,8 +224,6 @@ def __init__(self, *args): ) self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) self.postgresql_client_relation = PostgreSQLProvider(self) - self.legacy_db_relation = DbProvides(self, admin=False) - self.legacy_db_admin_relation = DbProvides(self, admin=True) self.backup = PostgreSQLBackups(self, "s3-parameters") self.tls = PostgreSQLTLS(self, PEER, [self.primary_endpoint, self.replicas_endpoint]) self.async_replication = PostgreSQLAsyncReplication(self) @@ -246,9 +242,10 @@ def __init__(self, *args): relation_name="logging", ) - postgresql_db_port = ServicePort(5432, name="database") - patroni_api_port = ServicePort(8008, name="api") - self.service_patcher = KubernetesServicePatch(self, [postgresql_db_port, patroni_api_port]) + try: + self.unit.set_ports(5432, 8008) + except ModelError: + logger.exception("failed to open port") self.tracing = TracingEndpointRequirer( self, relation_name=TRACING_RELATION_NAME, protocols=[TRACING_PROTOCOL] ) @@ -262,8 +259,6 @@ def tracing_endpoint(self) -> str | None: @property def _pebble_log_forwarding_supported(self) -> bool: # https://github.com/canonical/operator/issues/1230 - from ops.jujuversion import JujuVersion - juju_version = JujuVersion.from_environ() return juju_version > JujuVersion(version="3.3") @@ -334,8 +329,6 @@ def peer_relation_data(self, scope: Scopes) -> DataPeerData: def _translate_field_to_secret_key(self, key: str) -> str: """Change 'key' to secrets-compatible key field.""" - if not JujuVersion.from_environ().has_secrets: - return key key = SECRET_KEY_OVERRIDES.get(key, key) new_key = key.replace("_", "-") return new_key.strip("-") @@ -349,10 +342,6 @@ def get_secret(self, scope: Scopes, key: str) -> str | None: return None secret_key = self._translate_field_to_secret_key(key) - # Old translation in databag is to be taken - if result := self.peer_relation_data(scope).fetch_my_relation_field(peers.id, key): - return result - return self.peer_relation_data(scope).get_secret(peers.id, secret_key) def set_secret(self, scope: Scopes, key: str, value: str | None) -> str | None: @@ -367,8 +356,6 @@ def set_secret(self, scope: Scopes, key: str, value: str | None) -> str | None: return None secret_key = self._translate_field_to_secret_key(key) - # Old translation in databag is to be deleted - self.scoped_peer_data(scope).pop(key, None) self.peer_relation_data(scope).set_secret(peers.id, secret_key, value) def remove_secret(self, scope: Scopes, key: str) -> None: @@ -388,6 +375,26 @@ def is_cluster_initialised(self) -> bool: """Returns whether the cluster is already initialised.""" return "cluster_initialised" in self.app_peer_data + @property + def is_cluster_restoring_backup(self) -> bool: + """Returns whether the cluster is restoring a backup.""" + return "restoring-backup" in self.app_peer_data + + @property + def is_cluster_restoring_to_time(self) -> bool: + """Returns whether the cluster is restoring a backup to a specific time.""" + return "restore-to-time" in self.app_peer_data + + @property + def is_unit_departing(self) -> bool: + """Returns whether the unit is departing.""" + return "departing" in self.unit_peer_data + + @property + def is_unit_stopped(self) -> bool: + """Returns whether the unit is stopped.""" + return "stopped" in self.unit_peer_data + @property def postgresql(self) -> PostgreSQL: """Returns an instance of the object used to interact with the database.""" @@ -396,7 +403,7 @@ def postgresql(self) -> PostgreSQL: current_host=self.endpoint, user=USER, password=self.get_secret(APP_SCOPE, f"{USER}-password"), - database="postgres", + database=DATABASE_DEFAULT_NAME, system_users=SYSTEM_USERS, ) @@ -450,13 +457,22 @@ def get_unit_ip(self, unit: Unit) -> str | None: else: return None + def updated_synchronous_node_count(self) -> bool: + """Tries to update synchronous_node_count configuration and reports the result.""" + try: + self._patroni.update_synchronous_node_count() + return True + except RetryError: + logger.debug("Unable to set synchronous_node_count") + return False + def _on_peer_relation_departed(self, event: RelationDepartedEvent) -> None: """The leader removes the departing units from the list of cluster members.""" # Allow leader to update endpoints if it isn't leaving. if not self.unit.is_leader() or event.departing_unit == self.unit: return - if "cluster_initialised" not in self._peers.data[self.app]: + if not self.is_cluster_initialised or not self.updated_synchronous_node_count(): logger.debug( "Deferring on_peer_relation_departed: Cluster must be initialized before members can leave" ) @@ -521,7 +537,7 @@ def _on_peer_relation_changed(self, event: HookEvent) -> None: # noqa: C901 """Reconfigure cluster members.""" # The cluster must be initialized first in the leader unit # before any other member joins the cluster. - if "cluster_initialised" not in self._peers.data[self.app]: + if not self.is_cluster_initialised: if self.unit.is_leader(): if self._initialize_cluster(event): logger.debug("Deferring on_peer_relation_changed: Leader initialized cluster") @@ -566,7 +582,7 @@ def _on_peer_relation_changed(self, event: HookEvent) -> None: # noqa: C901 services = container.pebble.get_services(names=[self._postgresql_service]) if ( - ("restoring-backup" in self.app_peer_data or "restore-to-time" in self.app_peer_data) + (self.is_cluster_restoring_backup or self.is_cluster_restoring_to_time) and len(services) > 0 and not self._was_restore_successful(container, services[0]) ): @@ -660,6 +676,10 @@ def _on_config_changed(self, event) -> None: self.unit.status = BlockedStatus("Configuration Error. Please check the logs") logger.error("Invalid configuration: %s", str(e)) return + if not self.updated_synchronous_node_count(): + logger.debug("Defer on_config_changed: unable to set synchronous node count") + event.defer() + return if self.is_blocked and "Configuration Error" in self.unit.status.message: self._set_active_status() @@ -673,21 +693,6 @@ def _on_config_changed(self, event) -> None: # Enable and/or disable the extensions. self.enable_disable_extensions() - # Unblock the charm after extensions are enabled (only if it's blocked due to application - # charms requesting extensions). - if self.unit.status.message != EXTENSIONS_BLOCKING_MESSAGE: - return - - for relation in [ - *self.model.relations.get("db", []), - *self.model.relations.get("db-admin", []), - ]: - if not self.legacy_db_relation.set_up_relation(relation): - logger.debug( - "Early exit on_config_changed: legacy relation requested extensions that are still disabled" - ) - return - def enable_disable_extensions(self, database: str | None = None) -> None: """Enable/disable PostgreSQL extensions set through config options. @@ -769,7 +774,7 @@ def _add_members(self, event) -> None: # Reconfiguration can be successful only if the cluster is initialised # (i.e. first unit has bootstrap the cluster). - if "cluster_initialised" not in self._peers.data[self.app]: + if not self.is_cluster_initialised: return try: @@ -783,6 +788,7 @@ def _add_members(self, event) -> None: for member in self._hosts - self._patroni.cluster_members: logger.debug("Adding %s to cluster", member) self.add_cluster_member(member) + self._patroni.update_synchronous_node_count() except NotReadyError: logger.info("Deferring reconfigure: another member doing sync right now") event.defer() @@ -941,7 +947,7 @@ def _on_postgresql_pebble_ready(self, event: WorkloadEvent) -> None: # Otherwise, each unit will create a different cluster and # any update in the members list on the units won't have effect # on fixing that. - if not self.unit.is_leader() and "cluster_initialised" not in self._peers.data[self.app]: + if not self.unit.is_leader() and not self.is_cluster_initialised: logger.debug( "Deferring on_postgresql_pebble_ready: Not leader and cluster not initialized" ) @@ -993,10 +999,16 @@ def _set_active_status(self): self.app_peer_data["s3-initialization-block-message"] ) return - if self._patroni.get_primary(unit_name_pattern=True) == self.unit.name: - self.unit.status = ActiveStatus("Primary") - elif self.is_standby_leader: - self.unit.status = ActiveStatus("Standby") + if ( + self._patroni.get_primary(unit_name_pattern=True) == self.unit.name + or self.is_standby_leader + ): + danger_state = "" + if len(self._patroni.get_running_cluster_members()) < self.app.planned_units(): + danger_state = " (degraded)" + self.unit.status = ActiveStatus( + f"{'Standby' if self.is_standby_leader else 'Primary'}{danger_state}" + ) elif self._patroni.member_started: self.unit.status = ActiveStatus() except (RetryError, RequestsConnectionError) as e: @@ -1048,7 +1060,7 @@ def _initialize_cluster(self, event: WorkloadEvent) -> bool: self.postgresql.create_user( MONITORING_USER, self.get_secret(APP_SCOPE, MONITORING_PASSWORD_KEY), - extra_user_roles="pg_monitor", + extra_user_roles=["pg_monitor"], ) self.postgresql.set_up_database() @@ -1171,8 +1183,8 @@ def _has_non_restore_waiting_status(self) -> bool: """Returns whether the unit is in a waiting state and there is no restore process ongoing.""" return ( isinstance(self.unit.status, WaitingStatus) - and "restoring-backup" not in self.app_peer_data - and "restore-to-time" not in self.app_peer_data + and not self.is_cluster_restoring_backup + and not self.is_cluster_restoring_to_time ) def _on_get_password(self, event: ActionEvent) -> None: @@ -1268,6 +1280,26 @@ def _on_set_password(self, event: ActionEvent) -> None: event.set_results({"password": password}) + def _on_promote_to_primary(self, event: ActionEvent) -> None: + if event.params.get("scope") == "cluster": + return self.async_replication.promote_to_primary(event) + elif event.params.get("scope") == "unit": + return self.promote_primary_unit(event) + else: + event.fail("Scope should be either cluster or unit") + + def promote_primary_unit(self, event: ActionEvent) -> None: + """Handles promote to primary for unit scope.""" + if event.params.get("force"): + event.fail("Suprerfluous force flag with unit scope") + else: + try: + self._patroni.switchover(self.unit.name, wait=False) + except SwitchoverNotSyncError: + event.fail("Unit is not sync standby") + except SwitchoverFailedError: + event.fail("Switchover failed or timed out, check the logs for details") + def _on_get_primary(self, event: ActionEvent) -> None: """Get primary instance.""" try: @@ -1420,9 +1452,9 @@ def _on_update_status(self, _) -> None: return if ( - "restoring-backup" not in self.app_peer_data - and "restore-to-time" not in self.app_peer_data - and "stopped" not in self.unit_peer_data + not self.is_cluster_restoring_backup + and not self.is_cluster_restoring_to_time + and not self.is_unit_stopped and services[0].current != ServiceStatus.ACTIVE ): logger.warning( @@ -1438,7 +1470,7 @@ def _on_update_status(self, _) -> None: return if ( - "restoring-backup" in self.app_peer_data or "restore-to-time" in self.app_peer_data + self.is_cluster_restoring_backup or self.is_cluster_restoring_to_time ) and not self._was_restore_successful(container, services[0]): return @@ -1454,7 +1486,7 @@ def _on_update_status(self, _) -> None: def _was_restore_successful(self, container: Container, service: ServiceInfo) -> bool: """Checks if restore operation succeeded and S3 is properly configured.""" - if "restore-to-time" in self.app_peer_data and all(self.is_pitr_failed(container)): + if self.is_cluster_restoring_to_time and all(self.is_pitr_failed(container)): logger.error( "Restore failed: database service failed to reach point-in-time-recovery target. " "You can launch another restore with different parameters" @@ -1887,6 +1919,12 @@ def update_config(self, is_creating_backup: bool = False) -> bool: return True if not self._patroni.member_started: + if self.is_tls_enabled: + logger.debug( + "Early exit update_config: patroni not responding but TLS is enabled." + ) + self._handle_postgresql_restart_need() + return True logger.debug("Early exit update_config: Patroni not started yet") return False @@ -1937,6 +1975,14 @@ def _validate_config_options(self) -> None: if self.config.request_time_zone not in self.postgresql.get_postgresql_timezones(): raise ValueError("request_time_zone config option has an invalid value") + if ( + self.config.storage_default_table_access_method + not in self.postgresql.get_postgresql_default_table_access_methods() + ): + raise ValueError( + "storage_default_table_access_method config option has an invalid value" + ) + container = self.unit.get_container("postgresql") output, _ = container.exec(["locale", "-a"]).wait_output() locales = list(output.splitlines()) @@ -1949,8 +1995,14 @@ def _validate_config_options(self) -> None: def _handle_postgresql_restart_need(self): """Handle PostgreSQL restart need based on the TLS configuration and configuration changes.""" - restart_postgresql = self.is_tls_enabled != self.postgresql.is_tls_enabled() - self._patroni.reload_patroni_configuration() + if self._can_connect_to_postgresql: + restart_postgresql = self.is_tls_enabled != self.postgresql.is_tls_enabled() + else: + restart_postgresql = False + try: + self._patroni.reload_patroni_configuration() + except Exception as e: + logger.error(f"Reload patroni call failed! error: {e!s}") # Wait for some more time than the Patroni's loop_wait default value (10 seconds), # which tells how much time Patroni will wait before checking the configuration # file again to reload it. @@ -2080,11 +2132,7 @@ def on_deployed_without_trust(self) -> None: @property def client_relations(self) -> list[Relation]: """Return the list of established client relations.""" - relations = [] - for relation_name in ["database", "db", "db-admin"]: - for relation in self.model.relations.get(relation_name, []): - relations.append(relation) - return relations + return self.model.relations.get("database", []) def override_patroni_on_failure_condition( self, new_condition: str, repeat_cause: str | None diff --git a/src/config.py b/src/config.py index 61018420ad..f3b3bf4fc1 100644 --- a/src/config.py +++ b/src/config.py @@ -5,9 +5,10 @@ """Structured configuration for the PostgreSQL charm.""" import logging +from typing import Literal from charms.data_platform_libs.v0.data_models import BaseConfigModel -from pydantic import validator +from pydantic import PositiveInt, validator logger = logging.getLogger(__name__) @@ -15,91 +16,173 @@ class CharmConfig(BaseConfigModel): """Manager for the structured configuration.""" + synchronous_node_count: Literal["all", "majority"] | PositiveInt + connection_authentication_timeout: int | None + connection_statement_timeout: int | None + cpu_parallel_leader_participation: bool | None durability_synchronous_commit: str | None + experimental_max_connections: int | None instance_default_text_search_config: str | None instance_max_locks_per_transaction: int | None instance_password_encryption: str | None + instance_synchronize_seqscans: bool | None + logging_client_min_messages: str | None logging_log_connections: bool | None logging_log_disconnections: bool | None logging_log_lock_waits: bool | None logging_log_min_duration_statement: int | None + logging_track_functions: str | None memory_maintenance_work_mem: int | None memory_max_prepared_transactions: int | None memory_shared_buffers: int | None memory_temp_buffers: int | None memory_work_mem: int | None optimizer_constraint_exclusion: str | None + optimizer_cpu_index_tuple_cost: float | None + optimizer_cpu_operator_cost: float | None + optimizer_cpu_tuple_cost: float | None + optimizer_cursor_tuple_fraction: float | None optimizer_default_statistics_target: int | None + optimizer_enable_async_append: bool | None + optimizer_enable_bitmapscan: bool | None + optimizer_enable_gathermerge: bool | None + optimizer_enable_hashagg: bool | None + optimizer_enable_hashjoin: bool | None + optimizer_enable_incremental_sort: bool | None + optimizer_enable_indexonlyscan: bool | None + optimizer_enable_indexscan: bool | None + optimizer_enable_material: bool | None + optimizer_enable_memoize: bool | None + optimizer_enable_mergejoin: bool | None + optimizer_enable_nestloop: bool | None + optimizer_enable_parallel_append: bool | None + optimizer_enable_parallel_hash: bool | None + optimizer_enable_partition_pruning: bool | None + optimizer_enable_partitionwise_aggregate: bool | None + optimizer_enable_partitionwise_join: bool | None + optimizer_enable_seqscan: bool | None + optimizer_enable_sort: bool | None + optimizer_enable_tidscan: bool | None optimizer_from_collapse_limit: int | None + optimizer_geqo: bool | None + optimizer_geqo_effort: int | None + optimizer_geqo_generations: int | None + optimizer_geqo_pool_size: int | None + optimizer_geqo_seed: float | None + optimizer_geqo_selection_bias: float | None + optimizer_geqo_threshold: int | None + optimizer_jit: bool | None + optimizer_jit_above_cost: float | None + optimizer_jit_inline_above_cost: float | None + optimizer_jit_optimize_above_cost: float | None optimizer_join_collapse_limit: int | None - profile: str - profile_limit_memory: int | None + optimizer_min_parallel_index_scan_size: int | None + optimizer_min_parallel_table_scan_size: int | None + optimizer_parallel_setup_cost: float | None + optimizer_parallel_tuple_cost: float | None + plugin_address_standardizer_data_us_enable: bool + plugin_address_standardizer_enable: bool plugin_audit_enable: bool - plugin_citext_enable: bool - plugin_debversion_enable: bool - plugin_hstore_enable: bool - plugin_pg_trgm_enable: bool - plugin_plpython3u_enable: bool - plugin_unaccent_enable: bool plugin_bloom_enable: bool + plugin_bool_plperl_enable: bool plugin_btree_gin_enable: bool plugin_btree_gist_enable: bool + plugin_citext_enable: bool plugin_cube_enable: bool + plugin_debversion_enable: bool plugin_dict_int_enable: bool plugin_dict_xsyn_enable: bool plugin_earthdistance_enable: bool plugin_fuzzystrmatch_enable: bool + plugin_hll_enable: bool + plugin_hstore_enable: bool + plugin_hypopg_enable: bool + plugin_icu_ext_enable: bool plugin_intarray_enable: bool + plugin_ip4r_enable: bool plugin_isn_enable: bool + plugin_jsonb_plperl_enable: bool plugin_lo_enable: bool plugin_ltree_enable: bool plugin_old_snapshot_enable: bool + plugin_orafce_enable: bool plugin_pg_freespacemap_enable: bool + plugin_pg_similarity_enable: bool + plugin_pg_trgm_enable: bool + plugin_pg_visibility_enable: bool plugin_pgrowlocks_enable: bool plugin_pgstattuple_enable: bool - plugin_pg_visibility_enable: bool + plugin_plperl_enable: bool + plugin_plpython3u_enable: bool + plugin_pltcl_enable: bool + plugin_postgis_enable: bool + plugin_postgis_raster_enable: bool + plugin_postgis_tiger_geocoder_enable: bool + plugin_postgis_topology_enable: bool + plugin_prefix_enable: bool + plugin_rdkit_enable: bool plugin_seg_enable: bool + plugin_spi_enable: bool plugin_tablefunc_enable: bool plugin_tcn_enable: bool + plugin_tds_fdw_enable: bool + plugin_timescaledb_enable: bool plugin_tsm_system_rows_enable: bool plugin_tsm_system_time_enable: bool + plugin_unaccent_enable: bool plugin_uuid_ossp_enable: bool - plugin_spi_enable: bool - plugin_bool_plperl_enable: bool - plugin_hll_enable: bool - plugin_hypopg_enable: bool - plugin_ip4r_enable: bool - plugin_plperl_enable: bool - plugin_jsonb_plperl_enable: bool - plugin_orafce_enable: bool - plugin_pg_similarity_enable: bool - plugin_prefix_enable: bool - plugin_rdkit_enable: bool - plugin_tds_fdw_enable: bool - plugin_icu_ext_enable: bool - plugin_pltcl_enable: bool - plugin_postgis_enable: bool - plugin_address_standardizer_enable: bool - plugin_address_standardizer_data_us_enable: bool - plugin_postgis_tiger_geocoder_enable: bool - plugin_postgis_topology_enable: bool - plugin_postgis_raster_enable: bool plugin_vector_enable: bool - plugin_timescaledb_enable: bool + profile: str + profile_limit_memory: int | None + request_array_nulls: bool | None + request_backslash_quote: str | None request_date_style: str | None + request_deadlock_timeout: int | None + request_default_transaction_deferrable: bool | None + request_default_transaction_isolation: str | None + request_default_transaction_read_only: bool | None + request_escape_string_warning: bool | None + request_lock_timeout: int | None request_standard_conforming_strings: bool | None request_time_zone: str | None + request_track_activity_query_size: int | None + request_transform_null_equals: bool | None + request_xmlbinary: str | None + request_xmloption: str | None response_bytea_output: str | None + response_exit_on_error: bool | None + response_extra_float_digits: float | None + response_gin_fuzzy_search_limit: int | None response_lc_monetary: str | None response_lc_numeric: str | None response_lc_time: str | None + session_idle_in_transaction_session_timeout: int | None + storage_bgwriter_lru_maxpages: int | None + storage_bgwriter_lru_multiplier: float | None + storage_default_table_access_method: str | None + storage_gin_pending_list_limit: int | None + storage_old_snapshot_threshold: int | None vacuum_autovacuum_analyze_scale_factor: float | None vacuum_autovacuum_analyze_threshold: int | None vacuum_autovacuum_freeze_max_age: int | None + vacuum_autovacuum_naptime: int | None vacuum_autovacuum_vacuum_cost_delay: float | None + vacuum_autovacuum_vacuum_cost_limit: int | None + vacuum_autovacuum_vacuum_insert_scale_factor: float | None + vacuum_autovacuum_vacuum_insert_threshold: int | None vacuum_autovacuum_vacuum_scale_factor: float | None + vacuum_autovacuum_vacuum_threshold: int | None + vacuum_vacuum_cost_delay: float | None + vacuum_vacuum_cost_limit: int | None + vacuum_vacuum_cost_page_dirty: int | None + vacuum_vacuum_cost_page_hit: int | None + vacuum_vacuum_cost_page_miss: int | None + vacuum_vacuum_failsafe_age: int | None + vacuum_vacuum_freeze_min_age: int | None vacuum_vacuum_freeze_table_age: int | None - experimental_max_connections: int | None + vacuum_vacuum_multixact_failsafe_age: int | None + vacuum_vacuum_multixact_freeze_min_age: int | None + vacuum_vacuum_multixact_freeze_table_age: int | None @classmethod def keys(cls) -> list[str]: @@ -239,15 +322,6 @@ def profile_limit_memory_validator(cls, value: int) -> int | None: return value - @validator("response_bytea_output") - @classmethod - def response_bytea_output_values(cls, value: str) -> str | None: - """Check response_bytea_output config option is one of `escape` or `hex`.""" - if value not in ["escape", "hex"]: - raise ValueError("Value not one of 'escape' or 'hex'") - - return value - @validator("vacuum_autovacuum_analyze_scale_factor", "vacuum_autovacuum_vacuum_scale_factor") @classmethod def vacuum_autovacuum_vacuum_scale_factor_values(cls, value: float) -> float | None: @@ -292,3 +366,476 @@ def vacuum_vacuum_freeze_table_age_values(cls, value: int) -> int | None: raise ValueError("Value is not between 0 and 2000000000") return value + + @validator("connection_authentication_timeout") + @classmethod + def connection_authentication_timeout_values(cls, value: int) -> int | None: + """Check connection_authentication_timeout config option is between 1 and 600.""" + if value < 1 or value > 600: + raise ValueError("Value is not between 1 and 600") + + return value + + @validator("vacuum_autovacuum_naptime") + @classmethod + def vacuum_autovacuum_naptime_values(cls, value: int) -> int | None: + """Check vacuum_autovacuum_naptime config option is between 1 and 2147483.""" + if value < 1 or value > 2147483: + raise ValueError("Value is not between 1 and 2147483") + + return value + + @validator("vacuum_autovacuum_vacuum_cost_limit") + @classmethod + def vacuum_autovacuum_vacuum_cost_limit_values(cls, value: int) -> int | None: + """Check vacuum_autovacuum_vacuum_cost_limit config option is between -1 and 10000.""" + if value < -1 or value > 10000: + raise ValueError("Value is not between -1 and 10000") + + return value + + @validator("vacuum_autovacuum_vacuum_insert_scale_factor") + @classmethod + def vacuum_autovacuum_vacuum_insert_scale_factor_values(cls, value: float) -> float | None: + """Check vacuum_autovacuum_vacuum_insert_scale_factor config option is between 0 and 100.""" + if value < 0 or value > 100: + raise ValueError("Value is not between 0 and 100") + + return value + + @validator("vacuum_autovacuum_vacuum_insert_threshold") + @classmethod + def vacuum_autovacuum_vacuum_insert_threshold_values(cls, value: int) -> int | None: + """Check vacuum_autovacuum_vacuum_insert_threshold config option is between -1 and 2147483647.""" + if value < -1 or value > 2147483647: + raise ValueError("Value is not between -1 and 2147483647") + + return value + + @validator("vacuum_autovacuum_vacuum_threshold") + @classmethod + def vacuum_autovacuum_vacuum_threshold_values(cls, value: int) -> int | None: + """Check vacuum_autovacuum_vacuum_threshold config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("request_backslash_quote") + @classmethod + def request_backslash_quote_values(cls, value: str) -> str | None: + """Check request_backslash_quote config option is one of `safe_encoding`, `on` or 'off'.""" + if value not in ["safe_encoding", "on", "off"]: + raise ValueError("Value not one of `safe_encoding` or `on` or 'off'") + + return value + + @validator("storage_bgwriter_lru_maxpages") + @classmethod + def storage_bgwriter_lru_maxpages_values(cls, value: int) -> int | None: + """Check storage_bgwriter_lru_maxpages config option is between 0 and 1073741823.""" + if value < 0 or value > 1073741823: + raise ValueError("Value is not between 0 and 1073741823") + + return value + + @validator("storage_bgwriter_lru_multiplier") + @classmethod + def storage_bgwriter_lru_multiplier_values(cls, value: float) -> float | None: + """Check storage_bgwriter_lru_multiplier config option is between 0 and 10.""" + if value < 0 or value > 10: + raise ValueError("Value is not between 0 and 10") + + return value + + @validator("response_bytea_output") + @classmethod + def response_bytea_output_values(cls, value: str) -> str | None: + """Check response_bytea_output config option is one of `escape` or `hex`.""" + if value not in ["escape", "hex"]: + raise ValueError("Value not one of 'escape' or 'hex'") + + return value + + @validator("logging_client_min_messages") + @classmethod + def logging_client_min_messages_values(cls, value: str) -> str | None: + """Check logging_client_min_messages config option is one of 'debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'log', 'notice', 'warning' or 'error'.""" + if value not in [ + "debug5", + "debug4", + "debug3", + "debug2", + "debug1", + "log", + "notice", + "warning", + "error", + ]: + raise ValueError( + "Value not one of 'debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'log', 'notice', 'warning' or 'error'." + ) + + return value + + @validator("optimizer_cpu_index_tuple_cost") + @classmethod + def optimizer_cpu_index_tuple_cost_values(cls, value: float) -> float | None: + """Check optimizer_cpu_index_tuple_cost config option is between 0 and 1.80E+308.""" + if value < 0 or value > 1.80e308: + raise ValueError("Value is not between 0 and 1.80E+308") + + return value + + @validator("optimizer_cpu_operator_cost") + @classmethod + def optimizer_cpu_operator_cost_values(cls, value: float) -> float | None: + """Check optimizer_cpu_operator_cost config option is between 0 and 1.80E+308.""" + if value < 0 or value > 1.80e308: + raise ValueError("Value is not between 0 and 1.80E+308") + + return value + + @validator("optimizer_cpu_tuple_cost") + @classmethod + def optimizer_cpu_tuple_cost_values(cls, value: float) -> float | None: + """Check optimizer_cpu_tuple_cost config option is between 0 and 1.80E+308.""" + if value < 0 or value > 1.80e308: + raise ValueError("Value is not between 0 and 1.80E+308") + + return value + + @validator("optimizer_cursor_tuple_fraction") + @classmethod + def optimizer_cursor_tuple_fraction_values(cls, value: float) -> float | None: + """Check optimizer_cursor_tuple_fraction config option is between 0 and 1.""" + if value < 0 or value > 1: + raise ValueError("Value is not between 0 and 1") + + return value + + @validator("request_deadlock_timeout") + @classmethod + def request_deadlock_timeout_values(cls, value: int) -> int | None: + """Check request_deadlock_timeout config option is between 1 and 2147483647.""" + if value < 1 or value > 2147483647: + raise ValueError("Value is not between 1 and 2147483647") + + return value + + @validator("request_default_transaction_isolation") + @classmethod + def request_default_transaction_isolation_values(cls, value: str) -> str | None: + """Check request_default_transaction_isolation config option is one of 'serializable', 'repeatable read', 'read committed', 'read uncommitted'.""" + if value not in ["serializable", "repeatable read", "read committed", "read uncommitted"]: + raise ValueError( + "Value not one of 'serializable', 'repeatable read', 'read committed', 'read uncommitted'." + ) + + return value + + @validator("response_extra_float_digits") + @classmethod + def response_extra_float_digits_values(cls, value: int) -> int | None: + """Check response_extra_float_digits config option is between -15 and 3.""" + if value < -15 or value > 3: + raise ValueError("Value is not between -15 and 3") + + return value + + @validator("optimizer_geqo_effort") + @classmethod + def optimizer_geqo_effort_values(cls, value: int) -> int | None: + """Check optimizer_geqo_effort config option is between 1 and 10.""" + if value < 1 or value > 10: + raise ValueError("Value is not between 1 and 10") + + return value + + @validator("optimizer_geqo_generations") + @classmethod + def optimizer_geqo_generations_values(cls, value: int) -> int | None: + """Check optimizer_geqo_generations config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("optimizer_geqo_pool_size") + @classmethod + def optimizer_geqo_pool_size_values(cls, value: int) -> int | None: + """Check optimizer_geqo_pool_size config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("optimizer_geqo_seed") + @classmethod + def optimizer_geqo_seed_values(cls, value: float) -> float | None: + """Check optimizer_geqo_seed config option is between 0 and 1.""" + if value < 0 or value > 1: + raise ValueError("Value is not between 0 and 1") + + return value + + @validator("optimizer_geqo_selection_bias") + @classmethod + def optimizer_geqo_selection_bias_values(cls, value: float) -> float | None: + """Check optimizer_geqo_selection_bias config option is between 1.5 and 2.""" + if value < 1.5 or value > 2: + raise ValueError("Value is not between 1.5 and 2") + + return value + + @validator("optimizer_geqo_threshold") + @classmethod + def optimizer_geqo_threshold_values(cls, value: int) -> int | None: + """Check optimizer_geqo_threshold config option is between 2 and 2147483647.""" + if value < 2 or value > 2147483647: + raise ValueError("Value is not between 2 and 2147483647") + + return value + + @validator("response_gin_fuzzy_search_limit") + @classmethod + def response_gin_fuzzy_search_limit_values(cls, value: int) -> int | None: + """Check response_gin_fuzzy_search_limit config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("storage_gin_pending_list_limit") + @classmethod + def storage_gin_pending_list_limit_values(cls, value: int) -> int | None: + """Check storage_gin_pending_list_limit config option is between 64 and 2147483647.""" + if value < 64 or value > 2147483647: + raise ValueError("Value is not between 64 and 2147483647") + + return value + + @validator("session_idle_in_transaction_session_timeout") + @classmethod + def session_idle_in_transaction_session_timeout_values(cls, value: int) -> int | None: + """Check session_idle_in_transaction_session_timeout config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("optimizer_jit_above_cost") + @classmethod + def optimizer_jit_above_cost_values(cls, value: float) -> float | None: + """Check optimizer_jit_above_cost config option is between -1 and 1.80E+308.""" + if value < -1 or value > 1.80e308: + raise ValueError("Value is not between -1 and 1.80E+308") + + return value + + @validator("optimizer_jit_inline_above_cost") + @classmethod + def optimizer_jit_inline_above_cost_values(cls, value: float) -> float | None: + """Check optimizer_jit_inline_above_cost config option is between -1 and 1.80E+308.""" + if value < -1 or value > 1.80e308: + raise ValueError("Value is not between -1 and 1.80E+308") + + return value + + @validator("optimizer_jit_optimize_above_cost") + @classmethod + def optimizer_jit_optimize_above_cost_values(cls, value: float) -> float | None: + """Check optimizer_jit_optimize_above_cost config option is between -1 and 1.80E+308.""" + if value < -1 or value > 1.80e308: + raise ValueError("Value is not between -1 and 1.80E+308") + + return value + + @validator("request_lock_timeout") + @classmethod + def request_lock_timeout_values(cls, value: int) -> int | None: + """Check request_lock_timeout config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("optimizer_min_parallel_index_scan_size") + @classmethod + def optimizer_min_parallel_index_scan_size_values(cls, value: int) -> int | None: + """Check optimizer_min_parallel_index_scan_size config option is between 0 and 715827882.""" + if value < 0 or value > 715827882: + raise ValueError("Value is not between 0 and 715827882") + + return value + + @validator("optimizer_min_parallel_table_scan_size") + @classmethod + def optimizer_min_parallel_table_scan_size_values(cls, value: int) -> int | None: + """Check optimizer_min_parallel_table_scan_size config option is between 0 and 715827882.""" + if value < 0 or value > 715827882: + raise ValueError("Value is not between 0 and 715827882") + + return value + + @validator("storage_old_snapshot_threshold") + @classmethod + def storage_old_snapshot_threshold_values(cls, value: int) -> int | None: + """Check storage_old_snapshot_threshold config option is between -1 and 86400.""" + if value < -1 or value > 86400: + raise ValueError("Value is not between -1 and 86400") + + return value + + @validator("optimizer_parallel_setup_cost") + @classmethod + def optimizer_parallel_setup_cost_values(cls, value: float) -> float | None: + """Check optimizer_parallel_setup_cost config option is between 0 and 1.80E+308.""" + if value < 0 or value > 1.80e308: + raise ValueError("Value is not between 0 and 1.80E+308") + + return value + + @validator("optimizer_parallel_tuple_cost") + @classmethod + def optimizer_parallel_tuple_cost_values(cls, value: float) -> float | None: + """Check optimizer_parallel_tuple_cost config option is between 0 and 1.80E+308.""" + if value < 0 or value > 1.80e308: + raise ValueError("Value is not between 0 and 1.80E+308") + + return value + + @validator("connection_statement_timeout") + @classmethod + def connection_statement_timeout_values(cls, value: int) -> int | None: + """Check connection_statement_timeout config option is between 0 and 2147483647.""" + if value < 0 or value > 2147483647: + raise ValueError("Value is not between 0 and 2147483647") + + return value + + @validator("request_track_activity_query_size") + @classmethod + def request_track_activity_query_size_values(cls, value: int) -> int | None: + """Check request_track_activity_query_size config option is between 100 and 1048576.""" + if value < 100 or value > 1048576: + raise ValueError("Value is not between 100 and 1048576") + + return value + + @validator("logging_track_functions") + @classmethod + def logging_track_functions_values(cls, value: str) -> str | None: + """Check logging_track_functions config option is one of 'none', 'pl', 'all'.""" + if value not in ["none", "pl", "all"]: + raise ValueError("Value not one of 'none', 'pl', 'all'.") + + return value + + @validator("vacuum_vacuum_cost_delay") + @classmethod + def vacuum_vacuum_cost_delay_values(cls, value: float) -> float | None: + """Check vacuum_vacuum_cost_delay config option is between 0 and 100.""" + if value < 0 or value > 100: + raise ValueError("Value is not between 0 and 100") + + return value + + @validator("vacuum_vacuum_cost_limit") + @classmethod + def vacuum_vacuum_cost_limit_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_cost_limit config option is between 1 and 10000.""" + if value < 1 or value > 10000: + raise ValueError("Value is not between 1 and 10000") + + return value + + @validator("vacuum_vacuum_cost_page_dirty") + @classmethod + def vacuum_vacuum_cost_page_dirty_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_cost_page_dirty config option is between 0 and 10000.""" + if value < 0 or value > 10000: + raise ValueError("Value is not between 0 and 10000") + + return value + + @validator("vacuum_vacuum_cost_page_hit") + @classmethod + def vacuum_vacuum_cost_page_hit_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_cost_page_hit config option is between 0 and 10000.""" + if value < 0 or value > 10000: + raise ValueError("Value is not between 0 and 10000") + + return value + + @validator("vacuum_vacuum_cost_page_miss") + @classmethod + def vacuum_vacuum_cost_page_miss_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_cost_page_miss config option is between 0 and 10000.""" + if value < 0 or value > 10000: + raise ValueError("Value is not between 0 and 10000") + + return value + + @validator("vacuum_vacuum_failsafe_age") + @classmethod + def vacuum_vacuum_failsafe_age_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_failsafe_age config option is between 0 and 2100000000.""" + if value < 0 or value > 2100000000: + raise ValueError("Value is not between 0 and 2100000000") + + return value + + @validator("vacuum_vacuum_freeze_min_age") + @classmethod + def vacuum_vacuum_freeze_min_age_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_freeze_min_age config option is between 0 and 1000000000.""" + if value < 0 or value > 1000000000: + raise ValueError("Value is not between 0 and 1000000000") + + return value + + @validator("vacuum_vacuum_multixact_failsafe_age") + @classmethod + def vacuum_vacuum_multixact_failsafe_age_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_multixact_failsafe_age config option is between 0 and 2100000000.""" + if value < 0 or value > 2100000000: + raise ValueError("Value is not between 0 and 2100000000") + + return value + + @validator("vacuum_vacuum_multixact_freeze_min_age") + @classmethod + def vacuum_vacuum_multixact_freeze_min_age_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_multixact_freeze_min_age config option is between 0 and 1000000000.""" + if value < 0 or value > 1000000000: + raise ValueError("Value is not between 0 and 1000000000") + + return value + + @validator("vacuum_vacuum_multixact_freeze_table_age") + @classmethod + def vacuum_vacuum_multixact_freeze_table_age_values(cls, value: int) -> int | None: + """Check vacuum_vacuum_multixact_freeze_table_age config option is between 0 and 2000000000.""" + if value < 0 or value > 2000000000: + raise ValueError("Value is not between 0 and 2000000000") + + return value + + @validator("request_xmlbinary") + @classmethod + def request_xmlbinary_values(cls, value: str) -> str | None: + """Check request_xmlbinary config option is 'base64' or 'hex'.""" + if value not in ["base64", "hex"]: + raise ValueError("Value not 'base64' or 'hex'.") + + return value + + @validator("request_xmloption") + @classmethod + def request_xmloption_values(cls, value: str) -> str | None: + """Check request_xmloption config option is 'content' or 'document'.""" + if value not in ["content", "document"]: + raise ValueError("Value not 'content' or 'document'.") + + return value diff --git a/src/constants.py b/src/constants.py index c5b7d60552..8fc9f92572 100644 --- a/src/constants.py +++ b/src/constants.py @@ -3,6 +3,7 @@ """File containing constants to be used in the charm.""" +DATABASE_DEFAULT_NAME = "postgres" DATABASE_PORT = "5432" PEER = "database-peers" BACKUP_USER = "backup" @@ -51,9 +52,6 @@ TRACING_PROTOCOL = "otlp_http" DATABASE = "database" -LEGACY_DB = "db" -LEGACY_DB_ADMIN = "db-admin" -ALL_LEGACY_RELATIONS = [LEGACY_DB, LEGACY_DB_ADMIN] ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE = ( "Please choose one endpoint to use. No need to relate all of them simultaneously!" diff --git a/src/dependency.json b/src/dependency.json index fbe4dc6884..1f87a03a6d 100644 --- a/src/dependency.json +++ b/src/dependency.json @@ -8,7 +8,7 @@ "rock": { "dependencies": {}, "name": "charmed-postgresql", - "upgrade_supported": "^14", - "version": "14.11" + "upgrade_supported": "^16", + "version": "16.6" } } diff --git a/src/patroni.py b/src/patroni.py index 148c77f865..7441849709 100644 --- a/src/patroni.py +++ b/src/patroni.py @@ -53,6 +53,14 @@ class SwitchoverFailedError(Exception): """Raised when a switchover failed for some reason.""" +class SwitchoverNotSyncError(SwitchoverFailedError): + """Raised when a switchover failed because node is not sync.""" + + +class UpdateSyncNodeCountError(Exception): + """Raised when updating synchronous_node_count failed for some reason.""" + + class Patroni: """This class handles the communication with Patroni API and configuration files.""" @@ -126,6 +134,49 @@ def _get_alternative_patroni_url( url = self._patroni_url return url + @property + def _synchronous_node_count(self) -> int: + planned_units = self._charm.app.planned_units() + if self._charm.config.synchronous_node_count == "all": + return planned_units - 1 + elif self._charm.config.synchronous_node_count == "majority": + return planned_units // 2 + return ( + self._charm.config.synchronous_node_count + if self._charm.config.synchronous_node_count < self._members_count - 1 + else planned_units - 1 + ) + + def update_synchronous_node_count(self) -> None: + """Update synchronous_node_count.""" + # Try to update synchronous_node_count. + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3)): + with attempt: + r = requests.patch( + f"{self._patroni_url}/config", + json={"synchronous_node_count": self._synchronous_node_count}, + verify=self._verify, + auth=self._patroni_auth, + timeout=PATRONI_TIMEOUT, + ) + + # Check whether the update was unsuccessful. + if r.status_code != 200: + raise UpdateSyncNodeCountError(f"received {r.status_code}") + + def get_cluster( + self, attempt: AttemptManager, alternative_endpoints: list[str] | None = None + ) -> dict[str, str | int]: + """Call the cluster endpoint.""" + url = self._get_alternative_patroni_url(attempt, alternative_endpoints) + r = requests.get( + f"{url}/cluster", + verify=self._verify, + auth=self._patroni_auth, + timeout=PATRONI_TIMEOUT, + ) + return r.json() + def get_primary( self, unit_name_pattern=False, alternative_endpoints: list[str] | None = None ) -> str: @@ -142,11 +193,7 @@ def get_primary( # Request info from cluster endpoint (which returns all members of the cluster). for attempt in Retrying(stop=stop_after_attempt(len(self._endpoints) + 1)): with attempt: - url = self._get_alternative_patroni_url(attempt, alternative_endpoints) - r = requests.get( - f"{url}/cluster", verify=self._verify, timeout=5, auth=self._patroni_auth - ) - for member in r.json()["members"]: + for member in self.get_cluster(attempt, alternative_endpoints)["members"]: if member["role"] == "leader": primary = member["name"] if unit_name_pattern: @@ -171,14 +218,7 @@ def get_standby_leader( # Request info from cluster endpoint (which returns all members of the cluster). for attempt in Retrying(stop=stop_after_attempt(len(self._endpoints) + 1)): with attempt: - url = self._get_alternative_patroni_url(attempt) - r = requests.get( - f"{url}/cluster", - verify=self._verify, - auth=self._patroni_auth, - timeout=PATRONI_TIMEOUT, - ) - for member in r.json()["members"]: + for member in self.get_cluster(attempt)["members"]: if member["role"] == "standby_leader": if check_whether_is_running and member["state"] not in RUNNING_STATES: logger.warning(f"standby leader {member['name']} is not running") @@ -196,30 +236,33 @@ def get_sync_standby_names(self) -> list[str]: # Request info from cluster endpoint (which returns all members of the cluster). for attempt in Retrying(stop=stop_after_attempt(len(self._endpoints) + 1)): with attempt: - url = self._get_alternative_patroni_url(attempt) - r = requests.get( - f"{url}/cluster", - verify=self._verify, - auth=self._patroni_auth, - timeout=PATRONI_TIMEOUT, - ) - for member in r.json()["members"]: + for member in self.get_cluster(attempt)["members"]: if member["role"] == "sync_standby": sync_standbys.append("/".join(member["name"].rsplit("-", 1))) return sync_standbys @property - @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10)) def cluster_members(self) -> set: """Get the current cluster members.""" # Request info from cluster endpoint (which returns all members of the cluster). - r = requests.get( - f"{self._patroni_url}/cluster", - verify=self._verify, - auth=self._patroni_auth, - timeout=PATRONI_TIMEOUT, - ) - return {member["name"] for member in r.json()["members"]} + for attempt in Retrying( + stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10) + ): + with attempt: + return {member["name"] for member in self.get_cluster(attempt)["members"]} + + def get_running_cluster_members(self) -> list[str]: + """List running patroni members.""" + try: + for attempt in Retrying(stop=stop_after_attempt(1)): + with attempt: + return [ + member["name"] + for member in self.get_cluster(attempt)["members"] + if member["state"] in RUNNING_STATES + ] + except Exception: + return [] def are_all_members_ready(self) -> bool: """Check if all members are correctly running Patroni and PostgreSQL. @@ -233,17 +276,13 @@ def are_all_members_ready(self) -> bool: try: for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(3)): with attempt: - r = requests.get( - f"{self._patroni_url}/cluster", - verify=self._verify, - auth=self._patroni_auth, - timeout=PATRONI_TIMEOUT, + return all( + member["state"] in RUNNING_STATES + for member in self.get_cluster(attempt)["members"] ) except RetryError: return False - return all(member["state"] in RUNNING_STATES for member in r.json()["members"]) - @property def is_creating_backup(self) -> bool: """Returns whether a backup is being created.""" @@ -253,20 +292,13 @@ def is_creating_backup(self) -> bool: try: for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(3)): with attempt: - r = requests.get( - f"{self._patroni_url}/cluster", - verify=self._verify, - auth=self._patroni_auth, - timeout=PATRONI_TIMEOUT, + return any( + "tags" in member and member["tags"].get("is_creating_backup") + for member in self.get_cluster(attempt)["members"] ) except RetryError: return False - return any( - "tags" in member and member["tags"].get("is_creating_backup") - for member in r.json()["members"] - ) - @property def is_replication_healthy(self) -> bool: """Return whether the replication is healthy.""" @@ -525,7 +557,7 @@ def render_patroni_yml_file( restore_to_latest=restore_to_latest, stanza=stanza, restore_stanza=restore_stanza, - minority_count=self._members_count // 2, + synchronous_node_count=self._synchronous_node_count, version=self.rock_postgresql_version.split(".")[0], pg_parameters=parameters, primary_cluster_endpoint=self._charm.async_replication.get_primary_cluster_endpoint(), @@ -534,7 +566,7 @@ def render_patroni_yml_file( ) self._render_file(f"{self._storage_path}/patroni.yml", rendered, 0o644) - @retry(stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30)) + @retry(stop=stop_after_attempt(20), wait=wait_exponential(multiplier=1, min=2, max=30)) def reload_patroni_configuration(self) -> None: """Reloads the configuration after it was updated in the file.""" requests.post( @@ -578,7 +610,7 @@ def restart_postgresql(self) -> None: timeout=PATRONI_TIMEOUT, ) - def switchover(self, candidate: str | None = None) -> None: + def switchover(self, candidate: str | None = None, wait: bool = True) -> None: """Trigger a switchover.""" # Try to trigger the switchover. if candidate is not None: @@ -597,8 +629,18 @@ def switchover(self, candidate: str | None = None) -> None: # Check whether the switchover was unsuccessful. if r.status_code != 200: + if ( + r.status_code == 412 + and r.text == "candidate name does not match with sync_standby" + ): + logger.debug("Unit is not sync standby") + raise SwitchoverNotSyncError() + logger.warning(f"Switchover call failed with code {r.status_code} {r.text}") raise SwitchoverFailedError(f"received {r.status_code}") + if not wait: + return + for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3), reraise=True): with attempt: new_primary = self.get_primary() diff --git a/src/relations/async_replication.py b/src/relations/async_replication.py index 838b1bba20..1700de12b8 100644 --- a/src/relations/async_replication.py +++ b/src/relations/async_replication.py @@ -104,9 +104,6 @@ def __init__(self, charm): self.framework.observe( self.charm.on.create_replication_action, self._on_create_replication ) - self.framework.observe( - self.charm.on.promote_to_primary_action, self._on_promote_to_primary - ) self.framework.observe(self.charm.on.secret_changed, self._on_secret_changed) @@ -467,7 +464,7 @@ def is_primary_cluster(self) -> bool: return self.charm.app == self.get_primary_cluster() def _on_async_relation_broken(self, _) -> None: - if self.charm._peers is None or "departing" in self.charm._peers.data[self.charm.unit]: + if self.charm._peers is None or self.charm.is_unit_departing: logger.debug("Early exit on_async_relation_broken: Skipping departing unit.") return @@ -509,11 +506,11 @@ def _on_async_relation_changed(self, event: RelationChangedEvent) -> None: if not self._stop_database(event): return - if not all( + if not (self.charm.is_unit_stopped or self._is_following_promoted_cluster()) or not all( "stopped" in self.charm._peers.data[unit] or self.charm._peers.data[unit].get("unit-promoted-cluster-counter") == self._get_highest_promoted_cluster_counter_value() - for unit in {*self.charm._peers.units, self.charm.unit} + for unit in self.charm._peers.units ): self.charm.unit.status = WaitingStatus( "Waiting for the database to be stopped in all units" @@ -575,7 +572,7 @@ def _on_create_replication(self, event: ActionEvent) -> None: # Set the status. self.charm.unit.status = MaintenanceStatus("Creating replication...") - def _on_promote_to_primary(self, event: ActionEvent) -> None: + def promote_to_primary(self, event: ActionEvent) -> None: """Promote this cluster to the primary cluster.""" if ( self.charm.app.status.message != READ_ONLY_MODE_BLOCKING_MESSAGE @@ -692,10 +689,7 @@ def _set_app_status(self) -> None: def _stop_database(self, event: RelationChangedEvent) -> bool: """Stop the database.""" - if ( - "stopped" not in self.charm._peers.data[self.charm.unit] - and not self._is_following_promoted_cluster() - ): + if not self.charm.is_unit_stopped and not self._is_following_promoted_cluster(): if not self.charm.unit.is_leader() and not self.container.exists(POSTGRESQL_DATA_PATH): logger.debug("Early exit on_async_relation_changed: following promoted cluster.") return False diff --git a/src/relations/db.py b/src/relations/db.py deleted file mode 100644 index dee16696aa..0000000000 --- a/src/relations/db.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Postgres db and db-admin relation hooks & helpers.""" - -import logging -from typing import Iterable - -from charms.postgresql_k8s.v0.postgresql import ( - PostgreSQLCreateDatabaseError, - PostgreSQLCreateUserError, - PostgreSQLDeleteUserError, - PostgreSQLGetPostgreSQLVersionError, -) -from ops.charm import ( - CharmBase, - RelationBrokenEvent, - RelationChangedEvent, - RelationDepartedEvent, -) -from ops.framework import Object -from ops.model import ActiveStatus, BlockedStatus, Relation, Unit -from pgconnstr import ConnectionString - -from constants import ( - ALL_LEGACY_RELATIONS, - DATABASE_PORT, - ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE, -) -from utils import new_password - -logger = logging.getLogger(__name__) - -EXTENSIONS_BLOCKING_MESSAGE = "extensions requested through relation" - -ROLES_BLOCKING_MESSAGE = ( - "roles requested through relation, use postgresql_client interface instead" -) - - -class DbProvides(Object): - """Defines functionality for the 'provides' side of the 'db' relation. - - Hook events observed: - - relation-changed - - relation-departed - - relation-broken - """ - - def __init__(self, charm: CharmBase, admin: bool = False): - """Constructor for DbProvides object. - - Args: - charm: the charm for which this relation is provided - admin: a boolean defining whether or not this relation has admin permissions, switching - between "db" and "db-admin" relations. - """ - if admin: - self.relation_name = "db-admin" - else: - self.relation_name = "db" - - super().__init__(charm, self.relation_name) - - self.framework.observe( - charm.on[self.relation_name].relation_changed, self._on_relation_changed - ) - self.framework.observe( - charm.on[self.relation_name].relation_departed, self._on_relation_departed - ) - self.framework.observe( - charm.on[self.relation_name].relation_broken, self._on_relation_broken - ) - - self.admin = admin - self.charm = charm - - def _on_relation_changed(self, event: RelationChangedEvent) -> None: - """Handle the legacy db/db-admin relation changed event. - - Generate password and handle user and database creation for the related application. - """ - # Check for some conditions before trying to access the PostgreSQL instance. - if ( - "cluster_initialised" not in self.charm._peers.data[self.charm.app] - or not self.charm._patroni.member_started - ): - logger.debug( - "Deferring on_relation_changed: Cluster not initialized or patroni not running" - ) - event.defer() - return - - if not self.charm.unit.is_leader(): - return - - if self._check_multiple_endpoints(): - self.charm.unit.status = BlockedStatus(ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE) - return - - logger.warning(f"DEPRECATION WARNING - `{self.relation_name}` is a legacy interface") - - if ( - not self.set_up_relation(event.relation) - and self.charm.unit.status.message - == f"Failed to initialize {self.relation_name} relation" - ): - event.defer() - return - - def _check_exist_current_relation(self) -> bool: - return any(r in ALL_LEGACY_RELATIONS for r in self.charm.client_relations) - - def _check_multiple_endpoints(self) -> bool: - """Checks if there are relations with other endpoints.""" - is_exist = self._check_exist_current_relation() - for relation in self.charm.client_relations: - if relation.name not in ALL_LEGACY_RELATIONS and is_exist: - return True - return False - - def _get_extensions(self, relation: Relation) -> tuple[list, set]: - """Returns the list of required and disabled extensions.""" - requested_extensions = relation.data.get(relation.app, {}).get("extensions", "").split(",") - for unit in relation.units: - requested_extensions.extend( - relation.data.get(unit, {}).get("extensions", "").split(",") - ) - required_extensions = [] - for extension in requested_extensions: - if extension != "" and extension not in required_extensions: - required_extensions.append(extension) - disabled_extensions = set() - if required_extensions: - for extension in required_extensions: - extension_name = extension.split(":")[0] - if not self.charm.model.config.get(f"plugin_{extension_name}_enable"): - disabled_extensions.add(extension_name) - return required_extensions, disabled_extensions - - def _get_roles(self, relation: Relation) -> bool: - """Checks if relation required roles.""" - return "roles" in relation.data.get(relation.app, {}) - - def set_up_relation(self, relation: Relation) -> bool: - """Set up the relation to be used by the application charm.""" - # Do not allow apps requesting extensions to be installed - # (let them now about config options). - required_extensions, disabled_extensions = self._get_extensions(relation) - if disabled_extensions: - logger.error( - f"ERROR - `extensions` ({', '.join(disabled_extensions)}) cannot be requested through relations" - " - Please enable extensions through `juju config` and add the relation again." - ) - self.charm.unit.status = BlockedStatus(EXTENSIONS_BLOCKING_MESSAGE) - return False - - if self._get_roles(relation): - self.charm.unit.status = BlockedStatus(ROLES_BLOCKING_MESSAGE) - return False - - if not (database := relation.data.get(relation.app, {}).get("database")): - for unit in relation.units: - if database := relation.data.get(unit, {}).get("database"): - break - - if not database: - logger.warning("Early exit on_relation_changed: No database name provided") - return False - - try: - unit_relation_databag = relation.data[self.charm.unit] - application_relation_databag = relation.data[self.charm.app] - - # Creates the user and the database for this specific relation if it was not already - # created in a previous relation changed event. - user = f"relation_id_{relation.id}" - password = unit_relation_databag.get("password", new_password()) - self.charm.postgresql.create_user(user, password, self.admin) - plugins = self.charm.get_plugins() - - self.charm.postgresql.create_database( - database, user, plugins=plugins, client_relations=self.charm.client_relations - ) - - # Build the primary's connection string. - primary = str( - ConnectionString( - host=self.charm.primary_endpoint, - dbname=database, - port=DATABASE_PORT, - user=user, - password=password, - fallback_application_name=relation.app.name, - ) - ) - - # Build the standbys' connection string. - standbys = str( - ConnectionString( - host=self.charm.replicas_endpoint, - dbname=database, - port=DATABASE_PORT, - user=user, - password=password, - fallback_application_name=relation.app.name, - ) - ) - - postgresql_version = None - try: - postgresql_version = self.charm.postgresql.get_postgresql_version() - except PostgreSQLGetPostgreSQLVersionError: - logger.exception( - f"Failed to retrieve the PostgreSQL version to initialise/update {self.relation_name} relation" - ) - - # Set the data in both application and unit data bag. - # It's needed to run this logic on every relation changed event - # setting the data again in the databag, otherwise the application charm that - # is connecting to this database will receive a "database gone" event from the - # old PostgreSQL library (ops-lib-pgsql) and the connection between the - # application and this charm will not work. - updates = { - "allowed-subnets": self._get_allowed_subnets(relation), - "allowed-units": self._get_allowed_units(relation), - "host": self.charm.endpoint, - "master": primary, - "port": DATABASE_PORT, - "standbys": standbys, - "user": user, - "password": password, - "database": database, - "extensions": ",".join(required_extensions), - } - if postgresql_version: - updates["version"] = postgresql_version - application_relation_databag.update(updates) - unit_relation_databag.update(updates) - except ( - PostgreSQLCreateDatabaseError, - PostgreSQLCreateUserError, - ): - self.charm.unit.status = BlockedStatus( - f"Failed to initialize {self.relation_name} relation" - ) - return False - - self._update_unit_status(relation) - - return True - - def _check_for_blocking_relations(self, relation_id: int) -> bool: - """Checks if there are relations with extensions or roles. - - Args: - relation_id: current relation to be skipped - """ - for relname in ["db", "db-admin"]: - for relation in self.charm.model.relations.get(relname, []): - if relation.id == relation_id: - continue - for data in relation.data.values(): - if "extensions" in data or "roles" in data: - return True - return False - - def _on_relation_departed(self, event: RelationDepartedEvent) -> None: - """Handle the departure of legacy db and db-admin relations. - - Remove unit name from allowed_units key. - """ - # Check for some conditions before trying to access the PostgreSQL instance. - if ( - "cluster_initialised" not in self.charm._peers.data[self.charm.app] - or not self.charm._patroni.member_started - ): - logger.debug( - "Deferring on_relation_departed: Cluster not initialized or patroni not running" - ) - event.defer() - return - - # Set a flag to avoid deleting database users when this unit - # is removed and receives relation broken events from related applications. - # This is needed because of https://bugs.launchpad.net/juju/+bug/1979811. - if event.departing_unit == self.charm.unit: - self.charm._peers.data[self.charm.unit].update({"departing": "True"}) - return - - if not self.charm.unit.is_leader(): - return - - if event.departing_unit.app == self.charm.app: - # Just run for departing of remote units. - return - - departing_unit = event.departing_unit.name - local_unit_data = event.relation.data[self.charm.unit] - local_app_data = event.relation.data[self.charm.app] - - current_allowed_units = local_unit_data.get("allowed_units", "") - - logger.debug(f"Removing unit {departing_unit} from allowed_units") - local_app_data["allowed_units"] = local_unit_data["allowed_units"] = " ".join({ - unit for unit in current_allowed_units.split() if unit != departing_unit - }) - - def _on_relation_broken(self, event: RelationBrokenEvent) -> None: - """Remove the user created for this relation.""" - # Check for some conditions before trying to access the PostgreSQL instance. - if ( - "cluster_initialised" not in self.charm._peers.data[self.charm.app] - or not self.charm._patroni.member_started - ): - logger.debug( - "Deferring on_relation_broken: Cluster not initialized or patroni not running" - ) - event.defer() - return - - if "departing" in self.charm._peers.data[self.charm.unit]: - logger.debug("Early exit on_relation_broken: Skipping departing unit") - return - - if not self.charm.unit.is_leader(): - return - - # Delete the user. - user = f"relation_id_{event.relation.id}" - try: - self.charm.postgresql.delete_user(user) - except PostgreSQLDeleteUserError: - self.charm.unit.status = BlockedStatus( - f"Failed to delete user during {self.relation_name} relation broken event" - ) - - self._update_unit_status(event.relation) - - def _update_unit_status(self, relation: Relation) -> None: - """# Clean up Blocked status if it's due to extensions request.""" - if ( - self.charm._has_blocked_status - and self.charm.unit.status.message - in [ - EXTENSIONS_BLOCKING_MESSAGE, - ROLES_BLOCKING_MESSAGE, - ] - and not self._check_for_blocking_relations(relation.id) - ): - self.charm.unit.status = ActiveStatus() - - self._update_unit_status_on_blocking_endpoint_simultaneously() - - def _update_unit_status_on_blocking_endpoint_simultaneously(self): - """Clean up Blocked status if this is due related of multiple endpoints.""" - if ( - self.charm._has_blocked_status - and self.charm.unit.status.message == ENDPOINT_SIMULTANEOUSLY_BLOCKING_MESSAGE - and not self._check_multiple_endpoints() - ): - self.charm.unit.status = ActiveStatus() - - def _check_multiple_endpoints(self) -> bool: - """Checks if there are relations with other endpoints.""" - relation_names = {relation.name for relation in self.charm.client_relations} - return "database" in relation_names and len(relation_names) > 1 - - def _get_allowed_subnets(self, relation: Relation) -> str: - """Build the list of allowed subnets as in the legacy charm.""" - - def _csplit(s) -> Iterable[str]: - if s: - for b in s.split(","): - b = b.strip() - if b: - yield b - - subnets = set() - for unit, relation_data in relation.data.items(): - if isinstance(unit, Unit) and not unit.name.startswith(self.model.app.name): - # Egress-subnets is not always available. - subnets.update(set(_csplit(relation_data.get("egress-subnets", "")))) - return ",".join(sorted(subnets)) - - def _get_allowed_units(self, relation: Relation) -> str: - """Build the list of allowed units as in the legacy charm.""" - return ",".join( - sorted( - unit.name - for unit in relation.data - if isinstance(unit, Unit) and not unit.name.startswith(self.model.app.name) - ) - ) - - def _get_state(self) -> str: - """Gets the given state for this unit. - - Returns: - The state of this unit. Can be 'standalone', 'master', or 'standby'. - """ - if len(self.charm._peers.units) == 0: - return "standalone" - if self.charm._patroni.get_primary(unit_name_pattern=True) == self.charm.unit.name: - return "master" - else: - return "standby" diff --git a/src/relations/postgresql_provider.py b/src/relations/postgresql_provider.py index 8301b067f7..fb370e52d4 100644 --- a/src/relations/postgresql_provider.py +++ b/src/relations/postgresql_provider.py @@ -65,16 +65,21 @@ def __init__(self, charm: CharmBase, relation_name: str = "database") -> None: self.database_provides.on.database_requested, self._on_database_requested ) + @staticmethod + def _sanitize_extra_roles(extra_roles: str | None) -> list[str]: + """Standardize and sanitize user extra-roles.""" + if extra_roles is None: + return [] + + return [role.lower() for role in extra_roles.split(",")] + def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: """Handle the legacy postgresql-client relation changed event. Generate password and handle user and database creation for the related application. """ # Check for some conditions before trying to access the PostgreSQL instance. - if ( - "cluster_initialised" not in self.charm._peers.data[self.charm.app] - or not self.charm._patroni.member_started - ): + if not self.charm.is_cluster_initialised or not self.charm._patroni.member_started: logger.debug( "Deferring on_database_requested: Cluster must be initialized before database can be requested" ) @@ -83,7 +88,9 @@ def _on_database_requested(self, event: DatabaseRequestedEvent) -> None: # Retrieve the database name and extra user roles using the charm library. database = event.database - extra_user_roles = event.extra_user_roles + + # Make sure that certain groups are not in the list + extra_user_roles = self._sanitize_extra_roles(event.extra_user_roles) try: # Creates the user and the database for this specific relation. @@ -159,7 +166,7 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: # Check for some conditions before trying to access the PostgreSQL instance. if ( not self.charm._peers - or "cluster_initialised" not in self.charm._peers.data[self.charm.app] + or not self.charm.is_cluster_initialised or not self.charm._patroni.member_started ): logger.debug( @@ -170,7 +177,7 @@ def _on_relation_broken(self, event: RelationBrokenEvent) -> None: self._update_unit_status(event.relation) - if "departing" in self.charm._peers.data[self.charm.unit]: + if self.charm.is_unit_departing: logger.debug("Early exit on_relation_broken: Skipping departing unit") return @@ -271,9 +278,7 @@ def check_for_invalid_extra_user_roles(self, relation_id: int) -> bool: continue for data in relation.data.values(): extra_user_roles = data.get("extra-user-roles") - if extra_user_roles is None: - continue - extra_user_roles = extra_user_roles.lower().split(",") + extra_user_roles = self._sanitize_extra_roles(extra_user_roles) for extra_user_role in extra_user_roles: if ( extra_user_role not in valid_privileges diff --git a/src/upgrade.py b/src/upgrade.py index 5e0068944d..92dc307dc5 100644 --- a/src/upgrade.py +++ b/src/upgrade.py @@ -152,6 +152,7 @@ def _on_upgrade_changed(self, event) -> None: return self.charm.update_config() + self.charm.updated_synchronous_node_count() def _on_upgrade_charm_check_legacy(self, event: UpgradeCharmEvent) -> None: if not self.peer_relation: diff --git a/templates/patroni.yml.j2 b/templates/patroni.yml.j2 index 0921fcfda5..9c19745546 100644 --- a/templates/patroni.yml.j2 +++ b/templates/patroni.yml.j2 @@ -2,7 +2,7 @@ bootstrap: dcs: synchronous_mode: true failsafe_mode: true - synchronous_node_count: {{ minority_count }} + synchronous_node_count: {{ synchronous_node_count }} postgresql: use_pg_rewind: true remove_data_directory_on_rewind_failure: true @@ -156,11 +156,26 @@ postgresql: authentication: replication: password: {{ replication_password }} + {%- if enable_tls %} + sslrootcert: {{ conf_path }}/ca.pem + sslcert: {{ conf_path }}/cert.pem + sslkey: {{ conf_path }}/key.pem + {%- endif %} rewind: username: {{ rewind_user }} password: {{ rewind_password }} + {%- if enable_tls %} + sslrootcert: {{ conf_path }}/ca.pem + sslcert: {{ conf_path }}/cert.pem + sslkey: {{ conf_path }}/key.pem + {%- endif %} superuser: password: {{ superuser_password }} + {%- if enable_tls %} + sslrootcert: {{ conf_path }}/ca.pem + sslcert: {{ conf_path }}/cert.pem + sslkey: {{ conf_path }}/key.pem + {%- endif %} use_endpoints: true use_unix_socket: true {%- if is_no_sync_member or is_creating_backup %} diff --git a/terraform/variables.tf b/terraform/variables.tf index f69bd70d37..5a841c32b5 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -12,7 +12,7 @@ variable "app_name" { variable "channel" { description = "Charm channel to use when deploying" type = string - default = "14/stable" + default = "16/stable" } variable "revision" { @@ -24,7 +24,7 @@ variable "revision" { variable "base" { description = "Application base" type = string - default = "ubuntu@22.04" + default = "ubuntu@24.04" } variable "units" { diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index abacfd3269..57c3e4b87f 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,12 +1,96 @@ -#!/usr/bin/env python3 # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. +import logging +import os +import uuid + +import boto3 import pytest from pytest_operator.plugin import OpsTest +from . import architecture +from .helpers import construct_endpoint + +AWS = "AWS" +GCP = "GCP" + +logger = logging.getLogger(__name__) + + +@pytest.fixture(scope="session") +def charm(): + # Return str instead of pathlib.Path since python-libjuju's model.deploy(), juju deploy, and + # juju bundle files expect local charms to begin with `./` or `/` to distinguish them from + # Charmhub charms. + return f"./postgresql-k8s_ubuntu@24.04-{architecture.architecture}.charm" + + +def get_cloud_config(cloud: str) -> tuple[dict[str, str], dict[str, str]]: + # Define some configurations and credentials. + if cloud == AWS: + return { + "endpoint": "https://s3.amazonaws.com", + "bucket": "data-charms-testing", + "path": f"/postgresql-k8s/{uuid.uuid1()}", + "region": "us-east-1", + }, { + "access-key": os.environ["AWS_ACCESS_KEY"], + "secret-key": os.environ["AWS_SECRET_KEY"], + } + elif cloud == GCP: + return { + "endpoint": "https://storage.googleapis.com", + "bucket": "data-charms-testing", + "path": f"/postgresql-k8s/{uuid.uuid1()}", + "region": "", + }, { + "access-key": os.environ["GCP_ACCESS_KEY"], + "secret-key": os.environ["GCP_SECRET_KEY"], + } + + +def cleanup_cloud(config: dict[str, str], credentials: dict[str, str]) -> None: + # Delete the previously created objects. + logger.info("deleting the previously created backups") + session = boto3.session.Session( + aws_access_key_id=credentials["access-key"], + aws_secret_access_key=credentials["secret-key"], + region_name=config["region"], + ) + s3 = session.resource( + "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) + ) + bucket = s3.Bucket(config["bucket"]) + # GCS doesn't support batch delete operation, so delete the objects one by one. + for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): + bucket_object.delete() + + +@pytest.fixture(scope="module") +async def aws_cloud_configs(ops_test: OpsTest) -> None: + if ( + not os.environ.get("AWS_ACCESS_KEY", "").strip() + or not os.environ.get("AWS_SECRET_KEY", "").strip() + ): + pytest.skip("AWS configs not set") + return + + config, credentials = get_cloud_config(AWS) + yield config, credentials + + cleanup_cloud(config, credentials) + @pytest.fixture(scope="module") -async def database_charm(ops_test: OpsTest): - """Build the database charm.""" - charm = await ops_test.build_charm(".") - return charm +async def gcp_cloud_configs(ops_test: OpsTest) -> None: + if ( + not os.environ.get("GCP_ACCESS_KEY", "").strip() + or not os.environ.get("GCP_SECRET_KEY", "").strip() + ): + pytest.skip("GCP configs not set") + return + + config, credentials = get_cloud_config(GCP) + yield config, credentials + + cleanup_cloud(config, credentials) diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index a67bde151a..1cefb20bf0 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -501,6 +501,26 @@ async def get_postgresql_parameter(ops_test: OpsTest, parameter_name: str) -> in return parameter_value +async def get_leader(model: Model, application_name: str) -> str: + """Get the standby leader name. + + Args: + model: the model instance. + application_name: the name of the application to get the value for. + + Returns: + the name of the standby leader. + """ + status = await model.get_status() + first_unit_ip = next( + unit for unit in status["applications"][application_name]["units"].values() + )["address"] + cluster = get_patroni_cluster(first_unit_ip) + for member in cluster["members"]: + if member["role"] == "leader": + return member["name"] + + async def get_standby_leader(model: Model, application_name: str) -> str: """Get the standby leader name. @@ -550,8 +570,8 @@ async def inject_dependency_fault( ops_test, application_name, "upgrade", "dependencies" ) loaded_dependency_dict = json.loads(dependencies) - loaded_dependency_dict["charm"]["upgrade_supported"] = "^15" - loaded_dependency_dict["charm"]["version"] = "15.0" + loaded_dependency_dict["charm"]["upgrade_supported"] = "^25" + loaded_dependency_dict["charm"]["version"] = "25.0" # Overwrite dependency.json with incompatible version. with zipfile.ZipFile(charm_file, mode="a") as charm_zip: @@ -923,6 +943,14 @@ async def stop_continuous_writes(ops_test: OpsTest) -> int: return int(action.results["writes"]) +async def clear_continuous_writes(ops_test: OpsTest) -> None: + """Clears continuous writes to PostgreSQL.""" + action = await ops_test.model.units.get(f"{APPLICATION_NAME}/0").run_action( + "clear-continuous-writes" + ) + action = await action.wait() + + async def get_storage_id(ops_test: OpsTest, unit_name: str) -> str: """Retrieves storage id associated with provided unit. @@ -1145,3 +1173,24 @@ async def remove_unit_force(ops_test: OpsTest, num_units: int): timeout=1000, wait_for_exact_units=scale, ) + + +async def get_cluster_roles( + ops_test: OpsTest, unit_name: str +) -> dict[str, str | list[str] | None]: + """Returns whether the unit a replica in the cluster.""" + unit_ip = await get_unit_address(ops_test, unit_name) + members = {"replicas": [], "primaries": [], "sync_standbys": []} + member_list = get_patroni_cluster(unit_ip)["members"] + logger.info(f"Cluster members are: {member_list}") + for member in member_list: + role = member["role"] + name = "/".join(member["name"].rsplit("-", 1)) + if role == "leader": + members["primaries"].append(name) + elif role == "sync_standby": + members["sync_standbys"].append(name) + else: + members["replicas"].append(name) + + return members diff --git a/tests/integration/ha_tests/scripts/deploy_chaos_mesh.sh b/tests/integration/ha_tests/scripts/deploy_chaos_mesh.sh index 72dd22078b..9aaacc7019 100755 --- a/tests/integration/ha_tests/scripts/deploy_chaos_mesh.sh +++ b/tests/integration/ha_tests/scripts/deploy_chaos_mesh.sh @@ -12,18 +12,17 @@ fi deploy_chaos_mesh() { echo "adding chaos-mesh helm repo" - sg snap_microk8s -c "microk8s.helm3 repo add chaos-mesh https://charts.chaos-mesh.org" + microk8s.helm3 repo add chaos-mesh https://charts.chaos-mesh.org echo "installing chaos-mesh" - sg snap_microk8s -c "microk8s.helm3 install chaos-mesh chaos-mesh/chaos-mesh \ - --namespace=\"${chaos_mesh_ns}\" \ + microk8s.helm3 install chaos-mesh chaos-mesh/chaos-mesh \ + --namespace="${chaos_mesh_ns}" \ --set chaosDaemon.runtime=containerd \ --set chaosDaemon.socketPath=/var/snap/microk8s/common/run/containerd.sock \ --set dashboard.create=false \ - --version \"${chaos_mesh_version}\" \ + --version "${chaos_mesh_version}" \ --set clusterScoped=false \ - --set controllerManager.targetNamespace=\"${chaos_mesh_ns}\" \ - " + --set controllerManager.targetNamespace="${chaos_mesh_ns}" sleep 10 } diff --git a/tests/integration/ha_tests/scripts/destroy_chaos_mesh.sh b/tests/integration/ha_tests/scripts/destroy_chaos_mesh.sh index 19a81b35a9..4e23b08276 100755 --- a/tests/integration/ha_tests/scripts/destroy_chaos_mesh.sh +++ b/tests/integration/ha_tests/scripts/destroy_chaos_mesh.sh @@ -45,9 +45,9 @@ destroy_chaos_mesh() { timeout 30 kubectl delete crd "${args[@]}" || true fi - if [ -n "${chaos_mesh_ns}" ] && sg snap_microk8s -c "microk8s.helm3 repo list --namespace=${chaos_mesh_ns}" | grep -q 'chaos-mesh'; then + if [ -n "${chaos_mesh_ns}" ] && microk8s.helm3 repo list --namespace="${chaos_mesh_ns}" | grep -q 'chaos-mesh'; then echo "uninstalling chaos-mesh helm repo" - sg snap_microk8s -c "microk8s.helm3 uninstall chaos-mesh --namespace=\"${chaos_mesh_ns}\"" || true + microk8s.helm3 uninstall chaos-mesh --namespace="${chaos_mesh_ns}" || true fi } diff --git a/tests/integration/ha_tests/test_async_replication.py b/tests/integration/ha_tests/test_async_replication.py index df04ee61fb..664b921b00 100644 --- a/tests/integration/ha_tests/test_async_replication.py +++ b/tests/integration/ha_tests/test_async_replication.py @@ -6,7 +6,6 @@ import subprocess from asyncio import gather -import psycopg2 import pytest as pytest from juju.model import Model from lightkube import Client @@ -14,24 +13,22 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed -from .. import architecture, markers +from .. import architecture from ..helpers import ( APPLICATION_NAME, CHARM_BASE, DATABASE_APP_NAME, build_and_deploy, get_leader_unit, - get_password, - get_primary, - get_unit_address, scale_application, wait_for_relation_removed_between, ) from .helpers import ( are_writes_increasing, check_writes, + clear_continuous_writes, + get_leader, get_standby_leader, - get_sync_standby, start_continuous_writes, ) @@ -42,6 +39,7 @@ FAST_INTERVAL = "10s" IDLE_PERIOD = 5 TIMEOUT = 2000 +FIRST_DATABASE_RELATION_NAME = "database" @contextlib.asynccontextmanager @@ -100,15 +98,13 @@ async def second_model_continuous_writes(second_model) -> None: assert action.results["result"] == "True", "Unable to clear up continuous_writes table" -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_deploy_async_replication_setup( - ops_test: OpsTest, first_model: Model, second_model: Model + ops_test: OpsTest, charm, first_model: Model, second_model: Model ) -> None: """Build and deploy two PostgreSQL cluster in two separate models to test async replication.""" - await build_and_deploy(ops_test, CLUSTER_SIZE, wait_for_idle=False) - await build_and_deploy(ops_test, CLUSTER_SIZE, wait_for_idle=False, model=second_model) + await build_and_deploy(ops_test, charm, CLUSTER_SIZE, wait_for_idle=False) + await build_and_deploy(ops_test, charm, CLUSTER_SIZE, wait_for_idle=False, model=second_model) await ops_test.model.deploy( APPLICATION_NAME, channel="latest/edge", num_units=1, base=CHARM_BASE ) @@ -133,8 +129,6 @@ async def test_deploy_async_replication_setup( ) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_async_replication( ops_test: OpsTest, @@ -211,8 +205,6 @@ async def test_async_replication( await check_writes(ops_test, extra_model=second_model) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_switchover( ops_test: OpsTest, @@ -243,7 +235,7 @@ async def test_switchover( leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME, model=second_model) assert leader_unit is not None, "No leader unit found" logger.info("promoting the second cluster") - run_action = await leader_unit.run_action("promote-to-primary", **{"force": True}) + run_action = await leader_unit.run_action("promote-to-primary", force=True, scope="cluster") await run_action.wait() assert (run_action.results.get("return-code", None) == 0) or ( run_action.results.get("Code", None) == "0" @@ -266,8 +258,6 @@ async def test_switchover( await are_writes_increasing(ops_test, extra_model=second_model) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_promote_standby( ops_test: OpsTest, @@ -299,7 +289,7 @@ async def test_promote_standby( leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) assert leader_unit is not None, "No leader unit found" logger.info("promoting the first cluster") - run_action = await leader_unit.run_action("promote-to-primary") + run_action = await leader_unit.run_action("promote-to-primary", scope="cluster") await run_action.wait() assert (run_action.results.get("return-code", None) == 0) or ( run_action.results.get("Code", None) == "0" @@ -315,24 +305,22 @@ async def test_promote_standby( ), ) + logger.info("rerelate test app") + await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( + "database", f"{APPLICATION_NAME}:{FIRST_DATABASE_RELATION_NAME}" + ) + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active", raise_on_blocked=True + ) + await ops_test.model.relate( + DATABASE_APP_NAME, f"{APPLICATION_NAME}:{FIRST_DATABASE_RELATION_NAME}" + ) + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active", raise_on_blocked=True + ) + logger.info("removing the previous data") - primary = await get_primary(ops_test) - address = await get_unit_address(ops_test, primary) - password = await get_password(ops_test) - database_name = f"{APPLICATION_NAME.replace('-', '_')}_database" - connection = None - try: - connection = psycopg2.connect( - f"dbname={database_name} user=operator password={password} host={address}" - ) - connection.autocommit = True - cursor = connection.cursor() - cursor.execute("DROP TABLE IF EXISTS continuous_writes;") - except psycopg2.Error as e: - assert False, f"Failed to drop continuous writes table: {e}" - finally: - if connection is not None: - connection.close() + await clear_continuous_writes(ops_test) logger.info("starting continuous writes to the database") await start_continuous_writes(ops_test, DATABASE_APP_NAME) @@ -341,8 +329,6 @@ async def test_promote_standby( await are_writes_increasing(ops_test) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_reestablish_relation( ops_test: OpsTest, first_model: Model, second_model: Model, continuous_writes @@ -399,8 +385,6 @@ async def test_reestablish_relation( await check_writes(ops_test, extra_model=second_model) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_async_replication_failover_in_main_cluster( ops_test: OpsTest, first_model: Model, second_model: Model, continuous_writes @@ -412,11 +396,11 @@ async def test_async_replication_failover_in_main_cluster( logger.info("checking whether writes are increasing") await are_writes_increasing(ops_test) - sync_standby = await get_sync_standby(first_model, DATABASE_APP_NAME) - logger.info(f"Sync-standby: {sync_standby}") - logger.info("deleting the sync-standby pod") + primary = await get_leader(first_model, DATABASE_APP_NAME) + logger.info(f"Primary: {primary}") + logger.info("deleting the primary pod") client = Client(namespace=first_model.info.name) - client.delete(Pod, name=sync_standby.replace("/", "-")) + client.delete(Pod, name=primary.replace("/", "-")) async with ops_test.fast_forward(FAST_INTERVAL), fast_forward(second_model, FAST_INTERVAL): await gather( @@ -429,9 +413,9 @@ async def test_async_replication_failover_in_main_cluster( ) # Check that the sync-standby unit is not the same as before. - new_sync_standby = await get_sync_standby(first_model, DATABASE_APP_NAME) - logger.info(f"New sync-standby: {new_sync_standby}") - assert new_sync_standby != sync_standby, "Sync-standby is the same as before" + new_primary = await get_leader(first_model, DATABASE_APP_NAME) + logger.info(f"New sync-standby: {new_primary}") + assert new_primary != primary, "Sync-standby is the same as before" logger.info("Ensure continuous_writes after the crashed unit") await are_writes_increasing(ops_test) @@ -442,8 +426,6 @@ async def test_async_replication_failover_in_main_cluster( await check_writes(ops_test, extra_model=second_model) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_async_replication_failover_in_secondary_cluster( ops_test: OpsTest, first_model: Model, second_model: Model, continuous_writes @@ -480,8 +462,6 @@ async def test_async_replication_failover_in_secondary_cluster( await check_writes(ops_test, extra_model=second_model) -@pytest.mark.group(1) -@markers.juju3 @pytest.mark.abort_on_fail async def test_scaling( ops_test: OpsTest, first_model: Model, second_model: Model, continuous_writes diff --git a/tests/integration/ha_tests/test_replication.py b/tests/integration/ha_tests/test_replication.py index fcc88cd1d8..68741c56d3 100644 --- a/tests/integration/ha_tests/test_replication.py +++ b/tests/integration/ha_tests/test_replication.py @@ -26,16 +26,15 @@ ) -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest) -> None: +async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: """Build and deploy three unit of PostgreSQL.""" wait_for_apps = False # It is possible for users to provide their own cluster for HA testing. Hence, check if there # is a pre-existing cluster. if not await app_name(ops_test): wait_for_apps = True - await build_and_deploy(ops_test, 3, wait_for_idle=False) + await build_and_deploy(ops_test, charm, 3, wait_for_idle=False) # Deploy the continuous writes application charm if it wasn't already deployed. if not await app_name(ops_test, APPLICATION_NAME): wait_for_apps = True @@ -52,7 +51,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) -@pytest.mark.group(1) async def test_reelection(ops_test: OpsTest, continuous_writes, primary_start_timeout) -> None: """Kill primary unit, check reelection.""" app = await app_name(ops_test) @@ -84,7 +82,6 @@ async def test_reelection(ops_test: OpsTest, continuous_writes, primary_start_ti await is_cluster_updated(ops_test, primary_name) -@pytest.mark.group(1) async def test_consistency(ops_test: OpsTest, continuous_writes) -> None: """Write to primary, read data from secondaries (check consistency).""" # Locate primary unit. @@ -102,8 +99,9 @@ async def test_consistency(ops_test: OpsTest, continuous_writes) -> None: await check_writes(ops_test) -@pytest.mark.group(1) -async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous_writes) -> None: +async def test_no_data_replicated_between_clusters( + ops_test: OpsTest, charm, continuous_writes +) -> None: """Check that writes in one cluster are not replicated to another cluster.""" # Locate primary unit. app = await app_name(ops_test) @@ -111,7 +109,7 @@ async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous # Deploy another cluster. new_cluster_app = f"second-{app}" - await build_and_deploy(ops_test, 2, database_app_name=new_cluster_app) + await build_and_deploy(ops_test, charm, 2, database_app_name=new_cluster_app) # Start an application that continuously writes data to the database. await start_continuous_writes(ops_test, app) diff --git a/tests/integration/ha_tests/test_restart.py b/tests/integration/ha_tests/test_restart.py index e64f4a1833..26a93da597 100644 --- a/tests/integration/ha_tests/test_restart.py +++ b/tests/integration/ha_tests/test_restart.py @@ -31,11 +31,10 @@ CLUSTER_SIZE = 3 -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_deploy(ops_test: OpsTest) -> None: +async def test_deploy(ops_test: OpsTest, charm) -> None: """Build and deploy a PostgreSQL cluster and a test application.""" - await build_and_deploy(ops_test, CLUSTER_SIZE, wait_for_idle=False) + await build_and_deploy(ops_test, charm, CLUSTER_SIZE, wait_for_idle=False) if not await app_name(ops_test, APPLICATION_NAME): await ops_test.model.deploy(APPLICATION_NAME, num_units=1) @@ -48,7 +47,6 @@ async def test_deploy(ops_test: OpsTest) -> None: ) -@pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_restart(ops_test: OpsTest, continuous_writes) -> None: """Test restart of all the units simultaneously.""" diff --git a/tests/integration/ha_tests/test_rollback_to_master_label.py b/tests/integration/ha_tests/test_rollback_to_master_label.py deleted file mode 100644 index 7e6639b0ad..0000000000 --- a/tests/integration/ha_tests/test_rollback_to_master_label.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -import asyncio -import logging -import operator -import shutil -from pathlib import Path - -import pytest -from pytest_operator.plugin import OpsTest -from tenacity import Retrying, stop_after_attempt, wait_fixed - -from .. import markers -from ..architecture import architecture -from ..helpers import ( - APPLICATION_NAME, - CHARM_BASE, - DATABASE_APP_NAME, - METADATA, - get_leader_unit, - get_primary, - get_unit_by_index, -) -from .helpers import ( - are_writes_increasing, - check_writes, - get_instances_roles, - inject_dependency_fault, - start_continuous_writes, -) - -logger = logging.getLogger(__name__) - -TIMEOUT = 600 -LABEL_REVISION = 280 if architecture == "arm64" else 281 - - -@pytest.mark.group(1) -@markers.juju3 -@pytest.mark.unstable -@markers.amd64_only # TODO: remove after arm64 stable release -@pytest.mark.abort_on_fail -async def test_deploy_stable(ops_test: OpsTest) -> None: - """Simple test to ensure that the PostgreSQL and application charms get deployed.""" - await asyncio.gather( - ops_test.model.deploy( - DATABASE_APP_NAME, - num_units=3, - channel="14/stable", - revision=LABEL_REVISION, - base=CHARM_BASE, - trust=True, - ), - ops_test.model.deploy( - APPLICATION_NAME, - num_units=1, - channel="latest/edge", - base=CHARM_BASE, - ), - ) - logger.info("Wait for applications to become active") - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active", raise_on_error=False - ) - assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - -@pytest.mark.group(1) -@markers.juju3 -@pytest.mark.unstable -@markers.amd64_only # TODO: remove after arm64 stable release -async def test_fail_and_rollback(ops_test, continuous_writes) -> None: - # Start an application that continuously writes data to the database. - logger.info("starting continuous writes to the database") - await start_continuous_writes(ops_test, DATABASE_APP_NAME) - - # Check whether writes are increasing. - logger.info("checking whether writes are increasing") - await are_writes_increasing(ops_test) - - logger.info("Get leader unit") - leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) - assert leader_unit is not None, "No leader unit found" - - for attempt in Retrying(stop=stop_after_attempt(2), wait=wait_fixed(30), reraise=True): - with attempt: - logger.info("Run pre-upgrade-check action") - action = await leader_unit.run_action("pre-upgrade-check") - await action.wait() - - # Ensure the primary has changed to the first unit. - primary_name = await get_primary(ops_test, DATABASE_APP_NAME) - assert primary_name == f"{DATABASE_APP_NAME}/0" - - local_charm = await ops_test.build_charm(".") - filename = local_charm.split("/")[-1] if isinstance(local_charm, str) else local_charm.name - fault_charm = Path("/tmp/", filename) - shutil.copy(local_charm, fault_charm) - - logger.info("Inject dependency fault") - await inject_dependency_fault(ops_test, DATABASE_APP_NAME, fault_charm) - - application = ops_test.model.applications[DATABASE_APP_NAME] - - resources = {"postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"]} - application = ops_test.model.applications[DATABASE_APP_NAME] - - logger.info("Refresh the charm") - await application.refresh(path=fault_charm, resources=resources) - - logger.info("Get first upgrading unit") - # Highest ordinal unit always the first to upgrade. - unit = get_unit_by_index(DATABASE_APP_NAME, application.units, 2) - - logger.info("Wait for upgrade to fail on first upgrading unit") - async with ops_test.fast_forward("60s"): - await ops_test.model.block_until( - lambda: unit.workload_status == "blocked", - timeout=TIMEOUT, - ) - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - logger.info("Ensure continuous_writes while in failure state on remaining units") - await are_writes_increasing(ops_test) - - logger.info("Re-run pre-upgrade-check action") - action = await leader_unit.run_action("pre-upgrade-check") - await action.wait() - - logger.info("Re-refresh the charm") - await ops_test.juju( - "download", - "postgresql-k8s", - "--revision", - str(LABEL_REVISION), - "--filepath", - f"/tmp/postgresql-k8s_r{LABEL_REVISION}.charm", - ) - await ops_test.juju( - "refresh", - DATABASE_APP_NAME, - "--path", - f"/tmp/postgresql-k8s_r{LABEL_REVISION}.charm", - "--resource", - "postgresql-image=ghcr.io/canonical/charmed-postgresql@sha256:76ef26c7d11a524bcac206d5cb042ebc3c8c8ead73fa0cd69d21921552db03b6", - ) - - async with ops_test.fast_forward("60s"): - await ops_test.model.block_until( - lambda: unit.workload_status_message == "upgrade completed", timeout=TIMEOUT - ) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], idle_period=30, timeout=TIMEOUT - ) - - # Check whether writes are increasing. - logger.info("checking whether writes are increasing") - await are_writes_increasing(ops_test) - - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - logger.info("Resume upgrade") - action = await leader_unit.run_action("resume-upgrade") - await action.wait() - - logger.info("Wait for application to recover") - async with ops_test.fast_forward("60s"): - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT - ) - - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - logger.info("Ensure continuous_writes after rollback procedure") - await are_writes_increasing(ops_test) - - # Verify that no writes to the database were missed after stopping the writes - # (check that all the units have all the writes). - logger.info("Checking whether no writes were lost") - await check_writes(ops_test) - - # Remove fault charm file. - fault_charm.unlink() diff --git a/tests/integration/ha_tests/test_self_healing.py b/tests/integration/ha_tests/test_self_healing_1.py similarity index 87% rename from tests/integration/ha_tests/test_self_healing.py rename to tests/integration/ha_tests/test_self_healing_1.py index 1afd64239c..f972303380 100644 --- a/tests/integration/ha_tests/test_self_healing.py +++ b/tests/integration/ha_tests/test_self_healing_1.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. import asyncio @@ -20,7 +19,6 @@ get_password, get_unit_address, run_command_on_unit, - scale_application, ) from .helpers import ( are_all_db_processes_down, @@ -52,16 +50,15 @@ MEDIAN_ELECTION_TIME = 10 -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest) -> None: +async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: """Build and deploy three unit of PostgreSQL.""" wait_for_apps = False # It is possible for users to provide their own cluster for HA testing. Hence, check if there # is a pre-existing cluster. if not await app_name(ops_test): wait_for_apps = True - await build_and_deploy(ops_test, 3, wait_for_idle=False) + await build_and_deploy(ops_test, charm, 3, wait_for_idle=False) # Deploy the continuous writes application charm if it wasn't already deployed. if not await app_name(ops_test, APPLICATION_NAME): wait_for_apps = True @@ -78,7 +75,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail @pytest.mark.parametrize("process", DB_PROCESSES) @pytest.mark.parametrize("signal", ["SIGTERM", "SIGKILL"]) @@ -119,7 +115,6 @@ async def test_interruption_db_process( await is_cluster_updated(ops_test, primary_name) -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail @pytest.mark.parametrize("process", DB_PROCESSES) async def test_freeze_db_process( @@ -163,7 +158,6 @@ async def test_freeze_db_process( await is_cluster_updated(ops_test, primary_name) -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail @pytest.mark.parametrize("process", DB_PROCESSES) @pytest.mark.parametrize("signal", ["SIGTERM", "SIGKILL"]) @@ -229,7 +223,6 @@ async def test_full_cluster_restart( await check_writes(ops_test) -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail async def test_forceful_restart_without_data_and_transaction_logs( ops_test: OpsTest, @@ -317,7 +310,6 @@ async def test_forceful_restart_without_data_and_transaction_logs( await is_cluster_updated(ops_test, primary_name) -@pytest.mark.group("ha_tests") @pytest.mark.abort_on_fail @markers.amd64_only async def test_network_cut( @@ -381,48 +373,3 @@ async def test_network_cut( ) await is_cluster_updated(ops_test, primary_name) - - -@pytest.mark.group("scaling_to_zero") -@pytest.mark.abort_on_fail -async def test_scaling_to_zero(ops_test: OpsTest, continuous_writes) -> None: - """Scale the database to zero units and scale up again.""" - # Deploy applications - await test_build_and_deploy(ops_test) - - # Locate primary unit. - app = await app_name(ops_test) - - # Start an application that continuously writes data to the database. - await start_continuous_writes(ops_test, app) - - # Scale the database to zero units. - logger.info("scaling database to zero units") - await scale_application(ops_test, app, 0) - - # Scale the database to three units. - logger.info("scaling database to three units") - await scale_application(ops_test, app, 3) - - # Verify all units are up and running. - logger.info("waiting for the database service to start in all units") - for unit in ops_test.model.applications[app].units: - assert await is_postgresql_ready(ops_test, unit.name), ( - f"unit {unit.name} not restarted after cluster restart." - ) - - logger.info("checking whether writes are increasing") - await are_writes_increasing(ops_test) - - # Verify that all units are part of the same cluster. - logger.info("checking whether all units are part of the same cluster") - member_ips = await fetch_cluster_members(ops_test) - ip_addresses = [ - await get_unit_address(ops_test, unit.name) - for unit in ops_test.model.applications[app].units - ] - assert set(member_ips) == set(ip_addresses), "not all units are part of the same cluster." - - # Verify that no writes to the database were missed after stopping the writes. - logger.info("checking whether no writes to the database were missed after stopping the writes") - await check_writes(ops_test) diff --git a/tests/integration/ha_tests/test_self_healing_2.py b/tests/integration/ha_tests/test_self_healing_2.py new file mode 100644 index 0000000000..43b7d6a062 --- /dev/null +++ b/tests/integration/ha_tests/test_self_healing_2.py @@ -0,0 +1,97 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +import logging + +import pytest +from pytest_operator.plugin import OpsTest + +from ..helpers import ( + APPLICATION_NAME, + CHARM_BASE, + METADATA, + app_name, + build_and_deploy, + get_unit_address, + scale_application, +) +from .helpers import ( + are_writes_increasing, + check_writes, + fetch_cluster_members, + is_postgresql_ready, + start_continuous_writes, +) + +logger = logging.getLogger(__name__) + +APP_NAME = METADATA["name"] +PATRONI_PROCESS = "/usr/bin/patroni" +POSTGRESQL_PROCESS = "postgres" +DB_PROCESSES = [POSTGRESQL_PROCESS, PATRONI_PROCESS] +MEDIAN_ELECTION_TIME = 10 + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: + """Build and deploy three unit of PostgreSQL.""" + wait_for_apps = False + # It is possible for users to provide their own cluster for HA testing. Hence, check if there + # is a pre-existing cluster. + if not await app_name(ops_test): + wait_for_apps = True + await build_and_deploy(ops_test, charm, 3, wait_for_idle=False) + # Deploy the continuous writes application charm if it wasn't already deployed. + if not await app_name(ops_test, APPLICATION_NAME): + wait_for_apps = True + async with ops_test.fast_forward(): + await ops_test.model.deploy( + APPLICATION_NAME, + application_name=APPLICATION_NAME, + base=CHARM_BASE, + channel="edge", + ) + + if wait_for_apps: + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + + +@pytest.mark.abort_on_fail +async def test_scaling_to_zero(ops_test: OpsTest, continuous_writes) -> None: + """Scale the database to zero units and scale up again.""" + # Locate primary unit. + app = await app_name(ops_test) + + # Start an application that continuously writes data to the database. + await start_continuous_writes(ops_test, app) + + # Scale the database to zero units. + logger.info("scaling database to zero units") + await scale_application(ops_test, app, 0) + + # Scale the database to three units. + logger.info("scaling database to three units") + await scale_application(ops_test, app, 3) + + # Verify all units are up and running. + logger.info("waiting for the database service to start in all units") + for unit in ops_test.model.applications[app].units: + assert await is_postgresql_ready(ops_test, unit.name), ( + f"unit {unit.name} not restarted after cluster restart." + ) + + logger.info("checking whether writes are increasing") + await are_writes_increasing(ops_test) + + # Verify that all units are part of the same cluster. + logger.info("checking whether all units are part of the same cluster") + member_ips = await fetch_cluster_members(ops_test) + ip_addresses = [ + await get_unit_address(ops_test, unit.name) + for unit in ops_test.model.applications[app].units + ] + assert set(member_ips) == set(ip_addresses), "not all units are part of the same cluster." + + # Verify that no writes to the database were missed after stopping the writes. + logger.info("checking whether no writes to the database were missed after stopping the writes") + await check_writes(ops_test) diff --git a/tests/integration/ha_tests/test_smoke.py b/tests/integration/ha_tests/test_smoke.py index 2509bcc521..032349b66e 100644 --- a/tests/integration/ha_tests/test_smoke.py +++ b/tests/integration/ha_tests/test_smoke.py @@ -10,10 +10,11 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed -from .. import markers from ..helpers import ( CHARM_BASE, DATABASE_APP_NAME, + METADATA, + build_and_deploy, scale_application, ) from .helpers import ( @@ -43,25 +44,13 @@ env["KUBECONFIG"] = os.path.expanduser("~/.kube/config") -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail -async def test_app_force_removal(ops_test: OpsTest): +async def test_app_force_removal(ops_test: OpsTest, charm): """Remove unit with force while storage is alive.""" global primary_pv, primary_pvc # Deploy the charm. async with ops_test.fast_forward(): - await ops_test.model.deploy( - DATABASE_APP_NAME, - application_name=DATABASE_APP_NAME, - num_units=1, - channel="14/stable", - base=CHARM_BASE, - trust=True, - config={"profile": "testing"}, - ) - - await ops_test.model.wait_for_idle(status="active", timeout=1000) + await build_and_deploy(ops_test, charm, 1) assert ops_test.model.applications[DATABASE_APP_NAME].units[0].workload_status == "active" @@ -104,8 +93,6 @@ async def test_app_force_removal(ops_test: OpsTest): assert await is_storage_exists(ops_test, storage_id) -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail async def test_app_garbage_ignorance(ops_test: OpsTest): """Test charm deploy in dirty environment with garbage storage.""" @@ -157,18 +144,19 @@ async def test_app_garbage_ignorance(ops_test: OpsTest): delete_pvc(ops_test, primary_pvc) -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail -async def test_app_resources_conflicts(ops_test: OpsTest): +async def test_app_resources_conflicts(ops_test: OpsTest, charm): """Test application deploy in dirty environment with garbage storage from another application.""" global primary_pv, primary_pvc async with ops_test.fast_forward(): + resources = { + "postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"], + } await ops_test.model.deploy( - DATABASE_APP_NAME, + charm, + resources=resources, application_name=DUP_DATABASE_APP_NAME, num_units=1, - channel="14/stable", base=CHARM_BASE, trust=True, config={"profile": "testing"}, diff --git a/tests/integration/ha_tests/test_synchronous_policy.py b/tests/integration/ha_tests/test_synchronous_policy.py new file mode 100644 index 0000000000..4214a4ae11 --- /dev/null +++ b/tests/integration/ha_tests/test_synchronous_policy.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +import pytest +from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_attempt, wait_fixed + +from ..helpers import app_name, build_and_deploy +from .helpers import get_cluster_roles + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: + """Build and deploy three unit of PostgreSQL.""" + wait_for_apps = False + # It is possible for users to provide their own cluster for HA testing. Hence, check if there + # is a pre-existing cluster. + if not await app_name(ops_test): + wait_for_apps = True + await build_and_deploy(ops_test, charm, 3, wait_for_idle=False) + + if wait_for_apps: + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(status="active", timeout=1000, raise_on_error=False) + + +async def test_default_all(ops_test: OpsTest) -> None: + app = await app_name(ops_test) + + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=300) + + for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(5), reraise=True): + with attempt: + roles = await get_cluster_roles( + ops_test, ops_test.model.applications[app].units[0].name + ) + + assert len(roles["primaries"]) == 1 + assert len(roles["sync_standbys"]) == 2 + assert len(roles["replicas"]) == 0 + + +async def test_majority(ops_test: OpsTest) -> None: + app = await app_name(ops_test) + + await ops_test.model.applications[app].set_config({"synchronous_node_count": "majority"}) + + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(apps=[app], status="active") + + for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(5), reraise=True): + with attempt: + roles = await get_cluster_roles( + ops_test, ops_test.model.applications[app].units[0].name + ) + + assert len(roles["primaries"]) == 1 + assert len(roles["sync_standbys"]) == 1 + assert len(roles["replicas"]) == 1 + + +async def test_constant(ops_test: OpsTest) -> None: + app = await app_name(ops_test) + + await ops_test.model.applications[app].set_config({"synchronous_node_count": "2"}) + + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(apps=[app], status="active", timeout=300) + + for attempt in Retrying(stop=stop_after_attempt(3), wait=wait_fixed(5), reraise=True): + with attempt: + roles = await get_cluster_roles( + ops_test, ops_test.model.applications[app].units[0].name + ) + + assert len(roles["primaries"]) == 1 + assert len(roles["sync_standbys"]) == 2 + assert len(roles["replicas"]) == 0 diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py index 9c19c60f26..92e02c85e7 100644 --- a/tests/integration/ha_tests/test_upgrade.py +++ b/tests/integration/ha_tests/test_upgrade.py @@ -12,9 +12,11 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_fixed +from .. import markers from ..helpers import ( APPLICATION_NAME, CHARM_BASE, + CHARM_BASE_NOBLE, DATABASE_APP_NAME, METADATA, count_switchovers, @@ -34,7 +36,8 @@ TIMEOUT = 600 -@pytest.mark.group(1) +# No arm edge +@markers.amd64_only @pytest.mark.abort_on_fail async def test_deploy_latest(ops_test: OpsTest) -> None: """Simple test to ensure that the PostgreSQL and application charms get deployed.""" @@ -42,10 +45,10 @@ async def test_deploy_latest(ops_test: OpsTest) -> None: ops_test.model.deploy( DATABASE_APP_NAME, num_units=3, - channel="14/edge", + channel="16/edge", trust=True, config={"profile": "testing"}, - base=CHARM_BASE, + base=CHARM_BASE_NOBLE, ), ops_test.model.deploy( APPLICATION_NAME, @@ -65,7 +68,8 @@ async def test_deploy_latest(ops_test: OpsTest) -> None: assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 -@pytest.mark.group(1) +# No arm edge +@markers.amd64_only @pytest.mark.abort_on_fail async def test_pre_upgrade_check(ops_test: OpsTest) -> None: """Test that the pre-upgrade-check action runs successfully.""" @@ -92,9 +96,10 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None: assert stateful_set.spec.updateStrategy.rollingUpdate.partition == 2, "Partition not set to 2" -@pytest.mark.group(1) +# No arm edge +@markers.amd64_only @pytest.mark.abort_on_fail -async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None: +async def test_upgrade_from_edge(ops_test: OpsTest, charm, continuous_writes) -> None: # Start an application that continuously writes data to the database. logger.info("starting continuous writes to the database") await start_continuous_writes(ops_test, DATABASE_APP_NAME) @@ -109,9 +114,6 @@ async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None: resources = {"postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"]} application = ops_test.model.applications[DATABASE_APP_NAME] - logger.info("Build charm locally") - charm = await ops_test.build_charm(".") - logger.info("Refresh the charm") await application.refresh(path=charm, resources=resources) @@ -158,9 +160,10 @@ async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None: ) -@pytest.mark.group(1) +# No arm edge +@markers.amd64_only @pytest.mark.abort_on_fail -async def test_fail_and_rollback(ops_test, continuous_writes) -> None: +async def test_fail_and_rollback(ops_test, charm, continuous_writes) -> None: # Start an application that continuously writes data to the database. logger.info("starting continuous writes to the database") await start_continuous_writes(ops_test, DATABASE_APP_NAME) @@ -183,10 +186,9 @@ async def test_fail_and_rollback(ops_test, continuous_writes) -> None: primary_name = await get_primary(ops_test, DATABASE_APP_NAME) assert primary_name == f"{DATABASE_APP_NAME}/0" - local_charm = await ops_test.build_charm(".") - filename = local_charm.split("/")[-1] if isinstance(local_charm, str) else local_charm.name + filename = Path(charm).name fault_charm = Path("/tmp/", filename) - shutil.copy(local_charm, fault_charm) + shutil.copy(charm, fault_charm) logger.info("Inject dependency fault") await inject_dependency_fault(ops_test, DATABASE_APP_NAME, fault_charm) @@ -215,7 +217,7 @@ async def test_fail_and_rollback(ops_test, continuous_writes) -> None: await action.wait() logger.info("Re-refresh the charm") - await application.refresh(path=local_charm) + await application.refresh(path=charm) async with ops_test.fast_forward("60s"): await ops_test.model.block_until( diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py index ac221930e1..84155d7044 100644 --- a/tests/integration/ha_tests/test_upgrade_from_stable.py +++ b/tests/integration/ha_tests/test_upgrade_from_stable.py @@ -10,7 +10,6 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_fixed -from .. import markers from ..helpers import ( APPLICATION_NAME, CHARM_BASE, @@ -32,16 +31,16 @@ TIMEOUT = 10 * 60 -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail async def test_deploy_stable(ops_test: OpsTest) -> None: """Simple test to ensure that the PostgreSQL and application charms get deployed.""" + # TODO remove once we release to stable + pytest.skip("No 16/stable yet.") await asyncio.gather( ops_test.model.deploy( DATABASE_APP_NAME, num_units=3, - channel="14/stable", + channel="16/stable", trust=True, base=CHARM_BASE, ), @@ -60,11 +59,11 @@ async def test_deploy_stable(ops_test: OpsTest) -> None: assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail async def test_pre_upgrade_check(ops_test: OpsTest) -> None: """Test that the pre-upgrade-check action runs successfully.""" + # TODO remove once we release to stable + pytest.skip("No 16/stable yet.") application = ops_test.model.applications[DATABASE_APP_NAME] if "pre-upgrade-check" not in await application.get_actions(): logger.info("skipping the test because the charm from 14/stable doesn't support upgrade") @@ -93,11 +92,11 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None: assert stateful_set.spec.updateStrategy.rollingUpdate.partition == 2, "Partition not set to 2" -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release @pytest.mark.abort_on_fail -async def test_upgrade_from_stable(ops_test: OpsTest, continuous_writes): +async def test_upgrade_from_stable(ops_test: OpsTest, charm): """Test updating from stable channel.""" + # TODO remove once we release to stable + pytest.skip("No 16/stable yet.") # Start an application that continuously writes data to the database. logger.info("starting continuous writes to the database") await start_continuous_writes(ops_test, DATABASE_APP_NAME) @@ -113,9 +112,6 @@ async def test_upgrade_from_stable(ops_test: OpsTest, continuous_writes): application = ops_test.model.applications[DATABASE_APP_NAME] actions = await application.get_actions() - logger.info("Build charm locally") - charm = await ops_test.build_charm(".") - logger.info("Refresh the charm") await application.refresh(path=charm, resources=resources) diff --git a/tests/integration/ha_tests/test_upgrade_to_primary_label.py b/tests/integration/ha_tests/test_upgrade_to_primary_label.py deleted file mode 100644 index 0d76c08f2b..0000000000 --- a/tests/integration/ha_tests/test_upgrade_to_primary_label.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -import asyncio -import logging -import operator - -import pytest -from pytest_operator.plugin import OpsTest -from tenacity import Retrying, stop_after_attempt, wait_fixed - -from .. import markers -from ..architecture import architecture -from ..helpers import ( - APPLICATION_NAME, - CHARM_BASE, - CHARM_SERIES, - DATABASE_APP_NAME, - METADATA, - get_leader_unit, - get_primary, - get_unit_by_index, -) -from ..juju_ import juju_major_version -from .helpers import ( - are_writes_increasing, - check_writes, - get_instances_roles, - start_continuous_writes, -) - -logger = logging.getLogger(__name__) - -TIMEOUT = 600 - - -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release -@pytest.mark.unstable -@pytest.mark.abort_on_fail -async def test_deploy_stable(ops_test: OpsTest) -> None: - """Simple test to ensure that the PostgreSQL and application charms get deployed.""" - database_additional_params = {} - if juju_major_version >= 3: - database_additional_params["base"] = CHARM_BASE - else: - database_additional_params["series"] = CHARM_SERIES - - await asyncio.gather( - ops_test.model.deploy( - DATABASE_APP_NAME, - num_units=3, - channel="14/stable", - revision=(280 if architecture == "arm64" else 281), - trust=True, - **database_additional_params, - ), - ops_test.model.deploy( - APPLICATION_NAME, - num_units=1, - channel="latest/edge", - base=CHARM_BASE, - ), - ) - logger.info("Wait for applications to become active") - async with ops_test.fast_forward(): - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active", raise_on_error=False - ) - assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3 - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - -@pytest.mark.group(1) -@markers.amd64_only # TODO: remove after arm64 stable release -@pytest.mark.unstable -async def test_upgrade(ops_test, continuous_writes) -> None: - # Start an application that continuously writes data to the database. - logger.info("starting continuous writes to the database") - await start_continuous_writes(ops_test, DATABASE_APP_NAME) - - # Check whether writes are increasing. - logger.info("checking whether writes are increasing") - await are_writes_increasing(ops_test) - - logger.info("Get leader unit") - leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) - assert leader_unit is not None, "No leader unit found" - - for attempt in Retrying(stop=stop_after_attempt(2), wait=wait_fixed(30), reraise=True): - with attempt: - logger.info("Run pre-upgrade-check action") - action = await leader_unit.run_action("pre-upgrade-check") - await action.wait() - - # Ensure the primary has changed to the first unit. - primary_name = await get_primary(ops_test, DATABASE_APP_NAME) - assert primary_name == f"{DATABASE_APP_NAME}/0" - - local_charm = await ops_test.build_charm(".") - application = ops_test.model.applications[DATABASE_APP_NAME] - - resources = {"postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"]} - application = ops_test.model.applications[DATABASE_APP_NAME] - - logger.info("Refresh the charm") - await application.refresh(path=local_charm, resources=resources) - - logger.info("Get first upgrading unit") - # Highest ordinal unit always the first to upgrade. - unit = get_unit_by_index(DATABASE_APP_NAME, application.units, 2) - - async with ops_test.fast_forward("60s"): - await ops_test.model.block_until( - lambda: unit.workload_status_message == "upgrade completed", timeout=TIMEOUT - ) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], idle_period=30, timeout=TIMEOUT - ) - - # Check whether writes are increasing. - logger.info("checking whether writes are increasing") - await are_writes_increasing(ops_test) - - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 1 - assert operator.countOf(instances_roles.values(), "primary") == 0 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - logger.info("Resume upgrade") - action = await leader_unit.run_action("resume-upgrade") - await action.wait() - - logger.info("Wait for application to upgrade") - async with ops_test.fast_forward("60s"): - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], status="active", timeout=TIMEOUT - ) - - instances_roles = await get_instances_roles(ops_test) - assert operator.countOf(instances_roles.values(), "master") == 0 - assert operator.countOf(instances_roles.values(), "primary") == 1 - assert operator.countOf(instances_roles.values(), "replica") == 2 - - logger.info("Ensure continuous_writes after upgrade") - await are_writes_increasing(ops_test) - - # Verify that no writes to the database were missed after stopping the writes - # (check that all the units have all the writes). - logger.info("Checking whether no writes were lost") - await check_writes(ops_test) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 9fb6a2160f..2f65351ed7 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -32,8 +32,10 @@ wait_fixed, ) +from constants import DATABASE_DEFAULT_NAME + CHARM_BASE = "ubuntu@22.04" -CHARM_SERIES = "jammy" +CHARM_BASE_NOBLE = "ubuntu@24.04" METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) DATABASE_APP_NAME = METADATA["name"] APPLICATION_NAME = "postgresql-test-app" @@ -45,8 +47,6 @@ except FileNotFoundError: KUBECTL = "microk8s kubectl" -charm = None - logger = logging.getLogger(__name__) @@ -72,37 +72,36 @@ async def app_name( async def build_and_deploy( ops_test: OpsTest, + charm, num_units: int, database_app_name: str = DATABASE_APP_NAME, wait_for_idle: bool = True, status: str = "active", model: Model = None, + extra_config: dict[str, str] | None = None, ) -> None: """Builds the charm and deploys a specified number of units.""" if model is None: model = ops_test.model + if not extra_config: + extra_config = {} # It is possible for users to provide their own cluster for testing. Hence, check if there # is a pre-existing cluster. if await app_name(ops_test, database_app_name, model): return - global charm - if not charm: - charm = await ops_test.build_charm(".") resources = { "postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"], } - ( - await model.deploy( - charm, - resources=resources, - application_name=database_app_name, - trust=True, - num_units=num_units, - base=CHARM_BASE, - config={"profile": "testing"}, - ), + await model.deploy( + charm, + resources=resources, + application_name=database_app_name, + trust=True, + num_units=num_units, + base=CHARM_BASE_NOBLE, + config={**extra_config, "profile": "testing"}, ) if wait_for_idle: # Wait until the PostgreSQL charm is successfully deployed. @@ -329,7 +328,7 @@ async def execute_query_on_unit( unit_address: str, password: str, query: str, - database: str = "postgres", + database: str = DATABASE_DEFAULT_NAME, sslmode: str | None = None, ): """Execute given PostgreSQL query on a unit. @@ -423,11 +422,9 @@ def get_expected_k8s_resources(application: str) -> set: f"Endpoints/patroni-{application}", f"Endpoints/patroni-{application}-config", f"Endpoints/patroni-{application}-sync", - f"Endpoints/{application}", f"Endpoints/{application}-primary", f"Endpoints/{application}-replicas", f"Service/patroni-{application}-config", - f"Service/{application}", f"Service/{application}-primary", f"Service/{application}-replicas", } @@ -760,7 +757,6 @@ async def switchover( ) assert response.status_code == 200, f"Switchover status code is {response.status_code}" app_name = current_primary.split("/")[0] - minority_count = len(ops_test.model.applications[app_name].units) // 2 for attempt in Retrying(stop=stop_after_attempt(30), wait=wait_fixed(2), reraise=True): with attempt: response = requests.get(f"http://{primary_ip}:8008/cluster") @@ -768,7 +764,7 @@ async def switchover( standbys = len([ member for member in response.json()["members"] if member["role"] == "sync_standby" ]) - assert standbys >= minority_count + assert standbys == len(ops_test.model.applications[app_name].units) - 1 async def wait_for_idle_on_blocked( @@ -822,6 +818,7 @@ async def cat_file_from_unit(ops_test: OpsTest, filepath: str, unit_name: str) - async def backup_operations( ops_test: OpsTest, + charm, s3_integrator_app_name: str, tls_certificates_app_name: str, tls_config, @@ -840,7 +837,9 @@ async def backup_operations( # as archivo_mode is disabled after restoring the backup) and to TLS Certificates Operator # (to be able to create backups from replicas). database_app_name = f"{DATABASE_APP_NAME}-{cloud.lower()}" - await build_and_deploy(ops_test, 2, database_app_name=database_app_name, wait_for_idle=False) + await build_and_deploy( + ops_test, charm, 2, database_app_name=database_app_name, wait_for_idle=False + ) await ops_test.model.relate(database_app_name, tls_certificates_app_name) async with ops_test.fast_forward(fast_interval="60s"): diff --git a/tests/integration/markers.py b/tests/integration/markers.py index 2cfeab1c4f..2f6cdd315c 100644 --- a/tests/integration/markers.py +++ b/tests/integration/markers.py @@ -7,7 +7,6 @@ from . import architecture from .juju_ import juju_major_version -juju2 = pytest.mark.skipif(juju_major_version != 2, reason="Requires juju 2") juju3 = pytest.mark.skipif(juju_major_version != 3, reason="Requires juju 3") juju_secrets = pytest.mark.skipif(juju_major_version < 3, reason="Requires juju secrets") amd64_only = pytest.mark.skipif( diff --git a/tests/integration/new_relations/test_new_relations.py b/tests/integration/new_relations/test_new_relations_1.py similarity index 76% rename from tests/integration/new_relations/test_new_relations.py rename to tests/integration/new_relations/test_new_relations_1.py index 61bcbfdaa4..a2be3ed502 100644 --- a/tests/integration/new_relations/test_new_relations.py +++ b/tests/integration/new_relations/test_new_relations_1.py @@ -1,11 +1,9 @@ -#!/usr/bin/env python3 # Copyright 2022 Canonical Ltd. # See LICENSE file for licensing details. import asyncio import logging import secrets import string -from asyncio import gather from pathlib import Path import psycopg2 @@ -14,7 +12,8 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_fixed -from .. import markers +from constants import DATABASE_DEFAULT_NAME + from ..helpers import ( CHARM_BASE, check_database_users_existence, @@ -44,9 +43,8 @@ INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles" -@pytest.mark.group("smoke") @pytest.mark.abort_on_fail -async def test_database_relation_with_charm_libraries(ops_test: OpsTest, database_charm): +async def test_database_relation_with_charm_libraries(ops_test: OpsTest, charm): """Test basic functionality of database relation interface.""" # Deploy both charms (multiple units for each application to test that later they correctly # set data in the relation application databag using only the leader unit). @@ -60,7 +58,7 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest, databas channel="edge", ), ops_test.model.deploy( - database_charm, + charm, resources={ "postgresql-image": DATABASE_APP_METADATA["resources"]["postgresql-image"][ "upstream-source" @@ -73,7 +71,7 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest, databas config={"profile": "testing"}, ), ops_test.model.deploy( - database_charm, + charm, resources={ "postgresql-image": DATABASE_APP_METADATA["resources"]["postgresql-image"][ "upstream-source" @@ -163,7 +161,6 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest, databas cursor.execute("DROP TABLE test;") -@pytest.mark.group("smoke") async def test_user_with_extra_roles(ops_test: OpsTest): """Test superuser actions and the request for more permissions.""" # Get the connection string to connect to the database. @@ -184,7 +181,6 @@ async def test_user_with_extra_roles(ops_test: OpsTest): connection.close() -@pytest.mark.group("smoke") async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: OpsTest): """Test that two different application connect to the database with different credentials.""" # Set some variables to use in this test. @@ -224,7 +220,10 @@ async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: Op (another_application_app_name, f"{APPLICATION_APP_NAME.replace('-', '_')}_database"), ]: connection_string = await build_connection_string( - ops_test, application, FIRST_DATABASE_RELATION_NAME, database="postgres" + ops_test, + application, + FIRST_DATABASE_RELATION_NAME, + database=DATABASE_DEFAULT_NAME, ) with pytest.raises(psycopg2.Error): psycopg2.connect(connection_string) @@ -238,10 +237,7 @@ async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: Op psycopg2.connect(connection_string) -@pytest.mark.group("smoke") -async def test_an_application_can_connect_to_multiple_database_clusters( - ops_test: OpsTest, database_charm -): +async def test_an_application_can_connect_to_multiple_database_clusters(ops_test: OpsTest, charm): """Test that an application can connect to different clusters of the same database.""" # Relate the application with both database clusters # and wait for them exchanging some connection data. @@ -271,9 +267,8 @@ async def test_an_application_can_connect_to_multiple_database_clusters( assert application_connection_string != another_application_connection_string -@pytest.mark.group("smoke") async def test_an_application_can_connect_to_multiple_aliased_database_clusters( - ops_test: OpsTest, database_charm + ops_test: OpsTest, charm ): """Test that an application can connect to different clusters of the same database.""" # Relate the application with both database clusters @@ -309,7 +304,6 @@ async def test_an_application_can_connect_to_multiple_aliased_database_clusters( assert application_connection_string != another_application_connection_string -@pytest.mark.group("smoke") @pytest.mark.abort_on_fail async def test_an_application_can_request_multiple_databases(ops_test: OpsTest): """Test that an application can request additional databases using the same interface.""" @@ -333,7 +327,6 @@ async def test_an_application_can_request_multiple_databases(ops_test: OpsTest): assert first_database_connection_string != second_database_connection_string -@pytest.mark.group("smoke") async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest): """Test that there is no read-only endpoint in a standalone cluster.""" async with ops_test.fast_forward(): @@ -351,7 +344,6 @@ async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest): ) -@pytest.mark.group("smoke") async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest): """Test that there is read-only endpoint in a scaled up cluster.""" async with ops_test.fast_forward(): @@ -369,7 +361,6 @@ async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest): ) -@pytest.mark.group("smoke") async def test_relation_broken(ops_test: OpsTest): """Test that the user is removed when the relation is broken.""" async with ops_test.fast_forward(): @@ -390,7 +381,6 @@ async def test_relation_broken(ops_test: OpsTest): ) -@pytest.mark.group("smoke") async def test_restablish_relation(ops_test: OpsTest): """Test that a previously broken relation would be functional if restored.""" # Relate the charms and wait for them exchanging some connection data. @@ -428,7 +418,6 @@ async def test_restablish_relation(ops_test: OpsTest): assert data[0] == "other data" -@pytest.mark.group("smoke") @pytest.mark.abort_on_fail async def test_relation_with_no_database_name(ops_test: OpsTest): """Test that a relation with no database name doesn't block the charm.""" @@ -446,7 +435,6 @@ async def test_relation_with_no_database_name(ops_test: OpsTest): await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", raise_on_blocked=True) -@pytest.mark.group("smoke") @pytest.mark.abort_on_fail async def test_admin_role(ops_test: OpsTest): """Test that the admin role gives access to all the databases.""" @@ -465,7 +453,7 @@ async def test_admin_role(ops_test: OpsTest): # Check that the user can access all the databases. for database in [ - "postgres", + DATABASE_DEFAULT_NAME, f"{APPLICATION_APP_NAME.replace('-', '_')}_database", "another_application_database", ]: @@ -489,12 +477,12 @@ async def test_admin_role(ops_test: OpsTest): ) assert version == data - # Write some data (it should fail in the "postgres" database). + # Write some data (it should fail in the default database name). random_name = ( f"test_{''.join(secrets.choice(string.ascii_lowercase) for _ in range(10))}" ) - should_fail = database == "postgres" - cursor.execute(f"CREATE TABLE {random_name}(data TEXT);") + should_fail = database == DATABASE_DEFAULT_NAME + cursor.execute(f"CREATE SCHEMA test; CREATE TABLE test.{random_name}(data TEXT);") if should_fail: assert False, ( f"failed to run a statement in the following database: {database}" @@ -511,7 +499,7 @@ async def test_admin_role(ops_test: OpsTest): # Test the creation and deletion of databases. connection_string = await build_connection_string( - ops_test, DATA_INTEGRATOR_APP_NAME, "postgresql", database="postgres" + ops_test, DATA_INTEGRATOR_APP_NAME, "postgresql", database=DATABASE_DEFAULT_NAME ) connection = psycopg2.connect(connection_string) connection.autocommit = True @@ -520,8 +508,10 @@ async def test_admin_role(ops_test: OpsTest): cursor.execute(f"CREATE DATABASE {random_name};") cursor.execute(f"DROP DATABASE {random_name};") try: - cursor.execute("DROP DATABASE postgres;") - assert False, "the admin extra user role was able to drop the `postgres` system database" + cursor.execute(f"DROP DATABASE {DATABASE_DEFAULT_NAME};") + assert False, ( + f"the admin extra user role was able to drop the `{DATABASE_DEFAULT_NAME}` system database" + ) except psycopg2.errors.InsufficientPrivilege: # Ignore the error, as the admin extra user role mustn't be able to drop # the "postgres" system database. @@ -530,7 +520,6 @@ async def test_admin_role(ops_test: OpsTest): connection.close() -@pytest.mark.group("smoke") async def test_invalid_extra_user_roles(ops_test: OpsTest): async with ops_test.fast_forward(): # Remove the relation between the database and the first data integrator. @@ -590,154 +579,3 @@ async def test_invalid_extra_user_roles(ops_test: OpsTest): raise_on_blocked=False, timeout=1000, ) - - -@pytest.mark.group("clientapps") -@pytest.mark.abort_on_fail -async def test_database_deploy_clientapps(ops_test: OpsTest, database_charm): - """Test basic functionality of database relation interface.""" - # Deploy both charms (multiple units for each application to test that later they correctly - # set data in the relation application databag using only the leader unit). - async with ops_test.fast_forward(): - await asyncio.gather( - ops_test.model.deploy( - database_charm, - resources={ - "postgresql-image": DATABASE_APP_METADATA["resources"]["postgresql-image"][ - "upstream-source" - ] - }, - application_name=DATABASE_APP_NAME, - num_units=3, - base=CHARM_BASE, - trust=True, - config={"profile": "testing"}, - ), - ) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], - status="active", - raise_on_blocked=True, - raise_on_error=False, - timeout=1000, - ) - - -@pytest.mark.group("clientapps") -@markers.amd64_only # discourse-k8s charm not available for arm64 -async def test_discourse(ops_test: OpsTest): - # Deploy Discourse and Redis. - await gather( - ops_test.model.deploy(DISCOURSE_APP_NAME, application_name=DISCOURSE_APP_NAME), - ops_test.model.deploy( - REDIS_APP_NAME, application_name=REDIS_APP_NAME, channel="latest/edge", base=CHARM_BASE - ), - ) - - async with ops_test.fast_forward(): - # Enable the plugins/extensions required by Discourse. - logger.info("Enabling the plugins/extensions required by Discourse") - config = {"plugin_hstore_enable": "True", "plugin_pg_trgm_enable": "True"} - await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await gather( - ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], status="waiting"), - ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" - ), - ) - # Add both relations to Discourse (PostgreSQL and Redis) - # and wait for it to be ready. - logger.info("Adding relations") - await gather( - ops_test.model.add_relation(DATABASE_APP_NAME, DISCOURSE_APP_NAME), - ops_test.model.add_relation(REDIS_APP_NAME, DISCOURSE_APP_NAME), - ) - await gather( - ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], timeout=2000), - ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" - ), - ) - logger.info("Configuring Discourse") - config = { - "developer_emails": "noreply@canonical.com", - "external_hostname": "discourse-k8s", - "smtp_address": "test.local", - "smtp_domain": "test.local", - "s3_install_cors_rule": "false", - } - await ops_test.model.applications[DISCOURSE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], status="active") - - # Deploy a new discourse application (https://github.com/canonical/data-platform-libs/issues/118 - # prevents from re-relating the same Discourse application; Discourse uses the old secret and fails). - await ops_test.model.applications[DISCOURSE_APP_NAME].remove() - other_discourse_app_name = f"other-{DISCOURSE_APP_NAME}" - await ops_test.model.deploy(DISCOURSE_APP_NAME, application_name=other_discourse_app_name) - - # Add both relations to Discourse (PostgreSQL and Redis) - # and wait for it to be ready. - logger.info("Adding relations") - await gather( - ops_test.model.add_relation(DATABASE_APP_NAME, other_discourse_app_name), - ops_test.model.add_relation(REDIS_APP_NAME, other_discourse_app_name), - ) - await gather( - ops_test.model.wait_for_idle(apps=[other_discourse_app_name], timeout=2000), - ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" - ), - ) - logger.info("Configuring Discourse") - config = { - "developer_emails": "noreply@canonical.com", - "external_hostname": "discourse-k8s", - "smtp_address": "test.local", - "smtp_domain": "test.local", - "s3_install_cors_rule": "false", - } - await ops_test.model.applications[other_discourse_app_name].set_config(config) - await ops_test.model.wait_for_idle(apps=[other_discourse_app_name], status="active") - - -@pytest.mark.group("clientapps") -@markers.amd64_only # indico charm not available for arm64 -async def test_indico_datatabase(ops_test: OpsTest) -> None: - """Tests deploying and relating to the Indico charm.""" - async with ops_test.fast_forward(fast_interval="30s"): - await ops_test.model.deploy( - "indico", - channel="latest/edge", - application_name="indico", - num_units=1, - series="focal", - ) - await ops_test.model.deploy( - "redis-k8s", channel="stable", application_name="redis-broker", base="ubuntu@20.04" - ) - await ops_test.model.deploy( - "redis-k8s", channel="stable", application_name="redis-cache", base="ubuntu@20.04" - ) - await asyncio.gather( - ops_test.model.relate("redis-broker", "indico:redis-broker"), - ops_test.model.relate("redis-cache", "indico:redis-cache"), - ) - - # Wait for model to stabilise - await ops_test.model.wait_for_idle( - apps=["indico"], - status="waiting", - timeout=1000, - ) - - # Verify that the charm doesn't block when the extensions are enabled. - logger.info("Verifying that the charm doesn't block when the extensions are enabled") - config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} - await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") - await ops_test.model.relate(DATABASE_APP_NAME, "indico") - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, "indico"], - status="active", - timeout=2000, - ) diff --git a/tests/integration/new_relations/test_new_relations_2.py b/tests/integration/new_relations/test_new_relations_2.py new file mode 100644 index 0000000000..5bcf102d34 --- /dev/null +++ b/tests/integration/new_relations/test_new_relations_2.py @@ -0,0 +1,180 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +import asyncio +import logging +from asyncio import gather +from pathlib import Path + +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +from .. import markers +from ..helpers import ( + CHARM_BASE, +) + +logger = logging.getLogger(__name__) + +APPLICATION_APP_NAME = "postgresql-test-app" +DATABASE_APP_NAME = "database" +ANOTHER_DATABASE_APP_NAME = "another-database" +DATA_INTEGRATOR_APP_NAME = "data-integrator" +DISCOURSE_APP_NAME = "discourse-k8s" +REDIS_APP_NAME = "redis-k8s" +APP_NAMES = [APPLICATION_APP_NAME, DATABASE_APP_NAME, ANOTHER_DATABASE_APP_NAME] +DATABASE_APP_METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +FIRST_DATABASE_RELATION_NAME = "database" +SECOND_DATABASE_RELATION_NAME = "second-database" +MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "multiple-database-clusters" +ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "aliased-multiple-database-clusters" +NO_DATABASE_RELATION_NAME = "no-database" +INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles" + + +@pytest.mark.abort_on_fail +async def test_database_deploy_clientapps(ops_test: OpsTest, charm): + """Test basic functionality of database relation interface.""" + # Deploy both charms (multiple units for each application to test that later they correctly + # set data in the relation application databag using only the leader unit). + async with ops_test.fast_forward(): + await asyncio.gather( + ops_test.model.deploy( + charm, + resources={ + "postgresql-image": DATABASE_APP_METADATA["resources"]["postgresql-image"][ + "upstream-source" + ] + }, + application_name=DATABASE_APP_NAME, + num_units=3, + base=CHARM_BASE, + trust=True, + config={"profile": "testing"}, + ), + ) + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME], + status="active", + raise_on_blocked=True, + raise_on_error=False, + timeout=1000, + ) + + +@markers.amd64_only # discourse-k8s charm not available for arm64 +async def test_discourse(ops_test: OpsTest): + # Deploy Discourse and Redis. + await gather( + ops_test.model.deploy(DISCOURSE_APP_NAME, application_name=DISCOURSE_APP_NAME), + ops_test.model.deploy( + REDIS_APP_NAME, application_name=REDIS_APP_NAME, channel="latest/edge", base=CHARM_BASE + ), + ) + + async with ops_test.fast_forward(): + # Enable the plugins/extensions required by Discourse. + logger.info("Enabling the plugins/extensions required by Discourse") + config = {"plugin_hstore_enable": "True", "plugin_pg_trgm_enable": "True"} + await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) + await gather( + ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], status="waiting"), + ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" + ), + ) + # Add both relations to Discourse (PostgreSQL and Redis) + # and wait for it to be ready. + logger.info("Adding relations") + await gather( + ops_test.model.add_relation(DATABASE_APP_NAME, DISCOURSE_APP_NAME), + ops_test.model.add_relation(REDIS_APP_NAME, DISCOURSE_APP_NAME), + ) + await gather( + ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], timeout=2000), + ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" + ), + ) + logger.info("Configuring Discourse") + config = { + "developer_emails": "noreply@canonical.com", + "external_hostname": "discourse-k8s", + "smtp_address": "test.local", + "smtp_domain": "test.local", + "s3_install_cors_rule": "false", + } + await ops_test.model.applications[DISCOURSE_APP_NAME].set_config(config) + await ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], status="active") + + # Deploy a new discourse application (https://github.com/canonical/data-platform-libs/issues/118 + # prevents from re-relating the same Discourse application; Discourse uses the old secret and fails). + await ops_test.model.applications[DISCOURSE_APP_NAME].remove() + other_discourse_app_name = f"other-{DISCOURSE_APP_NAME}" + await ops_test.model.deploy(DISCOURSE_APP_NAME, application_name=other_discourse_app_name) + + # Add both relations to Discourse (PostgreSQL and Redis) + # and wait for it to be ready. + logger.info("Adding relations") + await gather( + ops_test.model.add_relation(DATABASE_APP_NAME, other_discourse_app_name), + ops_test.model.add_relation(REDIS_APP_NAME, other_discourse_app_name), + ) + await gather( + ops_test.model.wait_for_idle(apps=[other_discourse_app_name], timeout=2000), + ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active" + ), + ) + logger.info("Configuring Discourse") + config = { + "developer_emails": "noreply@canonical.com", + "external_hostname": "discourse-k8s", + "smtp_address": "test.local", + "smtp_domain": "test.local", + "s3_install_cors_rule": "false", + } + await ops_test.model.applications[other_discourse_app_name].set_config(config) + await ops_test.model.wait_for_idle(apps=[other_discourse_app_name], status="active") + + +@markers.amd64_only # indico charm not available for arm64 +async def test_indico_datatabase(ops_test: OpsTest) -> None: + """Tests deploying and relating to the Indico charm.""" + async with ops_test.fast_forward(fast_interval="30s"): + await ops_test.model.deploy( + "indico", + channel="latest/edge", + application_name="indico", + num_units=1, + series="focal", + ) + await ops_test.model.deploy( + "redis-k8s", channel="stable", application_name="redis-broker", base="ubuntu@20.04" + ) + await ops_test.model.deploy( + "redis-k8s", channel="stable", application_name="redis-cache", base="ubuntu@20.04" + ) + await asyncio.gather( + ops_test.model.relate("redis-broker", "indico:redis-broker"), + ops_test.model.relate("redis-cache", "indico:redis-cache"), + ) + + # Wait for model to stabilise + await ops_test.model.wait_for_idle( + apps=["indico"], + status="waiting", + timeout=1000, + ) + + # Verify that the charm doesn't block when the extensions are enabled. + logger.info("Verifying that the charm doesn't block when the extensions are enabled") + config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} + await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) + await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") + await ops_test.model.relate(DATABASE_APP_NAME, "indico") + await ops_test.model.wait_for_idle( + apps=[DATABASE_APP_NAME, "indico"], + status="active", + timeout=2000, + ) diff --git a/tests/integration/new_relations/test_relations_coherence.py b/tests/integration/new_relations/test_relations_coherence.py index aa489473d1..9b1dc66830 100644 --- a/tests/integration/new_relations/test_relations_coherence.py +++ b/tests/integration/new_relations/test_relations_coherence.py @@ -9,9 +9,11 @@ import pytest from pytest_operator.plugin import OpsTest +from constants import DATABASE_DEFAULT_NAME + from ..helpers import CHARM_BASE, DATABASE_APP_NAME, build_and_deploy from .helpers import build_connection_string -from .test_new_relations import DATA_INTEGRATOR_APP_NAME +from .test_new_relations_1 import DATA_INTEGRATOR_APP_NAME logger = logging.getLogger(__name__) @@ -20,12 +22,11 @@ FIRST_DATABASE_RELATION_NAME = "database" -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_relations(ops_test: OpsTest, database_charm): +async def test_relations(ops_test: OpsTest, charm): """Test that check relation data.""" async with ops_test.fast_forward(): - await build_and_deploy(ops_test, 1, DATABASE_APP_NAME) + await build_and_deploy(ops_test, charm, 1, DATABASE_APP_NAME) await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=3000) @@ -121,14 +122,14 @@ async def test_relations(ops_test: OpsTest, database_charm): for database in [ DATA_INTEGRATOR_APP_NAME.replace("-", "_"), - "postgres", + DATABASE_DEFAULT_NAME, ]: logger.info(f"connecting to the following database: {database}") connection_string = await build_connection_string( ops_test, DATA_INTEGRATOR_APP_NAME, "postgresql", database=database ) connection = None - should_fail = database == "postgres" + should_fail = database == DATABASE_DEFAULT_NAME try: with psycopg2.connect( connection_string @@ -137,7 +138,7 @@ async def test_relations(ops_test: OpsTest, database_charm): data = cursor.fetchone() assert data[0] == "some data" - # Write some data (it should fail in the "postgres" database). + # Write some data (it should fail in the default database name). random_name = f"test_{''.join(secrets.choice(string.ascii_lowercase) for _ in range(10))}" cursor.execute(f"CREATE TABLE {random_name}(data TEXT);") if should_fail: diff --git a/tests/integration/relations/__init__.py b/tests/integration/relations/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/integration/relations/helpers.py b/tests/integration/relations/helpers.py deleted file mode 100644 index fc3f5e6b4c..0000000000 --- a/tests/integration/relations/helpers.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - - -from ..helpers import METADATA -from ..new_relations.test_new_relations import ( - APPLICATION_APP_NAME, -) - -APP_NAME = METADATA["name"] -DB_RELATION = "db" -DATABASE_RELATION = "database" -FIRST_DATABASE_RELATION = "database" -APP_NAMES = [APP_NAME, APPLICATION_APP_NAME] diff --git a/tests/integration/relations/test_relations.py b/tests/integration/relations/test_relations.py deleted file mode 100644 index 0d33cf38ee..0000000000 --- a/tests/integration/relations/test_relations.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. -import asyncio -import logging - -import pytest -from pytest_operator.plugin import OpsTest - -from ..helpers import CHARM_BASE -from ..new_relations.test_new_relations import ( - APPLICATION_APP_NAME, - DATABASE_APP_METADATA, -) -from ..relations.helpers import ( - APP_NAME, - DATABASE_RELATION, - DB_RELATION, - FIRST_DATABASE_RELATION, -) - -logger = logging.getLogger(__name__) - - -@pytest.mark.group(1) -@pytest.mark.abort_on_fail -async def test_deploy_charms(ops_test: OpsTest, database_charm): - """Deploy both charms (application and database) to use in the tests.""" - # Deploy both charms (multiple units for each application to test that later they correctly - # set data in the relation application databag using only the leader unit). - async with ops_test.fast_forward(): - await asyncio.gather( - ops_test.model.deploy( - APPLICATION_APP_NAME, - application_name=APPLICATION_APP_NAME, - num_units=1, - base=CHARM_BASE, - channel="edge", - ), - ops_test.model.deploy( - database_charm, - resources={ - "postgresql-image": DATABASE_APP_METADATA["resources"]["postgresql-image"][ - "upstream-source" - ] - }, - application_name=APP_NAME, - num_units=1, - base=CHARM_BASE, - config={ - "profile": "testing", - "plugin_unaccent_enable": "True", - "plugin_pg_trgm_enable": "True", - }, - ), - ) - - await ops_test.model.wait_for_idle( - apps=[APP_NAME, APPLICATION_APP_NAME], status="active", timeout=3000 - ) - - -@pytest.mark.group(1) -async def test_legacy_and_modern_endpoints_simultaneously(ops_test: OpsTest): - await ops_test.model.relate(APPLICATION_APP_NAME, f"{APP_NAME}:{DB_RELATION}") - await ops_test.model.wait_for_idle( - status="active", - timeout=1500, - raise_on_error=False, - ) - - logger.info(" add relation with modern endpoints") - app = ops_test.model.applications[APP_NAME] - async with ops_test.fast_forward(): - await ops_test.model.relate(APP_NAME, f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}") - await ops_test.model.block_until( - lambda: "blocked" in {unit.workload_status for unit in app.units}, - timeout=1500, - ) - - logger.info(" remove relation with legacy endpoints") - await ops_test.model.applications[APP_NAME].destroy_relation( - f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" - ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) - - logger.info(" add relation with legacy endpoints") - async with ops_test.fast_forward(): - await ops_test.model.relate(APPLICATION_APP_NAME, f"{APP_NAME}:{DB_RELATION}") - await ops_test.model.block_until( - lambda: "blocked" in {unit.workload_status for unit in app.units}, - timeout=1500, - ) - - logger.info(" remove relation with modern endpoints") - await ops_test.model.applications[APP_NAME].destroy_relation( - f"{APP_NAME}:{DATABASE_RELATION}", f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}" - ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) - - logger.info(" remove relation with legacy endpoints") - await ops_test.model.applications[APP_NAME].destroy_relation( - f"{APP_NAME}:{DB_RELATION}", f"{APPLICATION_APP_NAME}:{DB_RELATION}" - ) - await ops_test.model.wait_for_idle(status="active", timeout=1500) - - logger.info(" add relation with modern endpoints") - await ops_test.model.relate(APP_NAME, f"{APPLICATION_APP_NAME}:{FIRST_DATABASE_RELATION}") - await ops_test.model.wait_for_idle(status="active", timeout=1500) diff --git a/tests/integration/test_audit.py b/tests/integration/test_audit.py index cd61538715..307b98cd61 100644 --- a/tests/integration/test_audit.py +++ b/tests/integration/test_audit.py @@ -22,11 +22,12 @@ RELATION_ENDPOINT = "database" -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_audit_plugin(ops_test: OpsTest) -> None: +async def test_audit_plugin(ops_test: OpsTest, charm) -> None: """Test the audit plugin.""" - await asyncio.gather(build_and_deploy(ops_test, 1), ops_test.model.deploy(APPLICATION_NAME)) + await asyncio.gather( + build_and_deploy(ops_test, charm, 1), ops_test.model.deploy(APPLICATION_NAME) + ) await ops_test.model.relate(f"{APPLICATION_NAME}:{RELATION_ENDPOINT}", DATABASE_APP_NAME) async with ops_test.fast_forward(): await ops_test.model.wait_for_idle( diff --git a/tests/integration/test_backups_aws.py b/tests/integration/test_backups_aws.py new file mode 100644 index 0000000000..76e82c5faa --- /dev/null +++ b/tests/integration/test_backups_aws.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import logging + +import pytest +from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_attempt, wait_exponential + +from . import architecture +from .conftest import AWS +from .helpers import ( + DATABASE_APP_NAME, + backup_operations, + db_connect, + get_password, + get_primary, + get_unit_address, + scale_application, + switchover, +) + +ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE = "the S3 repository has backups from another cluster" +FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE = ( + "failed to access/create the bucket, check your S3 settings" +) +FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE = "failed to initialize stanza, check your S3 settings" +S3_INTEGRATOR_APP_NAME = "s3-integrator" +tls_certificates_app_name = "self-signed-certificates" +tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" +tls_config = {"ca-common-name": "Test CA"} + +logger = logging.getLogger(__name__) + + +@pytest.mark.abort_on_fail +async def test_backup_aws(ops_test: OpsTest, charm, aws_cloud_configs: tuple[dict, dict]) -> None: + """Build and deploy two units of PostgreSQL in AWS and then test the backup and restore actions.""" + config = aws_cloud_configs[0] + credentials = aws_cloud_configs[1] + + await backup_operations( + ops_test, + charm, + S3_INTEGRATOR_APP_NAME, + tls_certificates_app_name, + tls_config, + tls_channel, + credentials, + AWS, + config, + ) + database_app_name = f"{DATABASE_APP_NAME}-aws" + + async with ops_test.fast_forward(): + logger.info("removing the TLS relation") + await ops_test.model.applications[database_app_name].remove_relation( + f"{database_app_name}:certificates", + f"{tls_certificates_app_name}:certificates", + ) + + new_unit_name = f"{database_app_name}/1" + + # Scale up to be able to test primary and leader being different. + async with ops_test.fast_forward(): + await scale_application(ops_test, database_app_name, 2) + + logger.info("ensuring that the replication is working correctly") + address = await get_unit_address(ops_test, new_unit_name) + password = await get_password(ops_test, database_app_name=database_app_name) + patroni_password = await get_password( + ops_test, "patroni", database_app_name=database_app_name + ) + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + with db_connect( + host=address, password=password + ) as connection, connection.cursor() as cursor: + cursor.execute( + "SELECT EXISTS (SELECT FROM information_schema.tables" + " WHERE table_schema = 'public' AND table_name = 'backup_table_1');" + ) + assert cursor.fetchone()[0], ( + f"replication isn't working correctly: table 'backup_table_1' doesn't exist in {new_unit_name}" + ) + cursor.execute( + "SELECT EXISTS (SELECT FROM information_schema.tables" + " WHERE table_schema = 'public' AND table_name = 'backup_table_2');" + ) + assert not cursor.fetchone()[0], ( + f"replication isn't working correctly: table 'backup_table_2' exists in {new_unit_name}" + ) + connection.close() + + old_primary = await get_primary(ops_test, database_app_name) + logger.info(f"performing a switchover from {old_primary} to {new_unit_name}") + await switchover(ops_test, old_primary, patroni_password, new_unit_name) + + logger.info("checking that the primary unit has changed") + primary = await get_primary(ops_test, database_app_name) + for attempt in Retrying( + stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + assert primary == new_unit_name + + # Ensure stanza is working correctly. + logger.info("listing the available backups to ensure that the stanza is working correctly") + action = await ops_test.model.units.get(new_unit_name).run_action("list-backups") + await action.wait() + backups = action.results.get("backups") + assert backups, "backups not outputted" + + await ops_test.model.wait_for_idle(status="active", timeout=1000) + + # Remove the database app. + await ops_test.model.remove_application(database_app_name) + await ops_test.model.block_until( + lambda: database_app_name not in ops_test.model.applications, timeout=1000 + ) + # Remove the TLS operator. + await ops_test.model.remove_application(tls_certificates_app_name) + await ops_test.model.block_until( + lambda: tls_certificates_app_name not in ops_test.model.applications, timeout=1000 + ) diff --git a/tests/integration/test_backups.py b/tests/integration/test_backups_gcp.py similarity index 53% rename from tests/integration/test_backups.py rename to tests/integration/test_backups_gcp.py index bf2a5ea469..b4882d1700 100644 --- a/tests/integration/test_backups.py +++ b/tests/integration/test_backups_gcp.py @@ -4,29 +4,24 @@ import logging import uuid -import boto3 -import pytest as pytest +import pytest from lightkube.core.client import Client from lightkube.resources.core_v1 import Pod from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import GCP from .helpers import ( DATABASE_APP_NAME, backup_operations, build_and_deploy, cat_file_from_unit, - construct_endpoint, db_connect, get_password, - get_primary, get_unit_address, - scale_application, - switchover, wait_for_idle_on_blocked, ) -from .juju_ import juju_major_version ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE = "the S3 repository has backups from another cluster" FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE = ( @@ -34,169 +29,22 @@ ) FAILED_TO_INITIALIZE_STANZA_ERROR_MESSAGE = "failed to initialize stanza, check your S3 settings" S3_INTEGRATOR_APP_NAME = "s3-integrator" -if juju_major_version < 3: - tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" - tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} -else: - tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" - tls_config = {"ca-common-name": "Test CA"} +tls_certificates_app_name = "self-signed-certificates" +tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" +tls_config = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - -@pytest.fixture(scope="module") -async def cloud_configs(ops_test: OpsTest, github_secrets) -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-k8s/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-k8s/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": github_secrets["AWS_ACCESS_KEY"], - "secret-key": github_secrets["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": github_secrets["GCP_ACCESS_KEY"], - "secret-key": github_secrets["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - - -@pytest.mark.group("AWS") @pytest.mark.abort_on_fail -async def test_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict]) -> None: - """Build and deploy two units of PostgreSQL in AWS and then test the backup and restore actions.""" - config = cloud_configs[0][AWS] - credentials = cloud_configs[1][AWS] - - await backup_operations( - ops_test, - S3_INTEGRATOR_APP_NAME, - tls_certificates_app_name, - tls_config, - tls_channel, - credentials, - AWS, - config, - ) - database_app_name = f"{DATABASE_APP_NAME}-aws" - - async with ops_test.fast_forward(): - logger.info("removing the TLS relation") - await ops_test.model.applications[database_app_name].remove_relation( - f"{database_app_name}:certificates", - f"{tls_certificates_app_name}:certificates", - ) - - new_unit_name = f"{database_app_name}/1" - - # Scale up to be able to test primary and leader being different. - async with ops_test.fast_forward(): - await scale_application(ops_test, database_app_name, 2) - - logger.info("ensuring that the replication is working correctly") - address = await get_unit_address(ops_test, new_unit_name) - password = await get_password(ops_test, database_app_name=database_app_name) - patroni_password = await get_password( - ops_test, "patroni", database_app_name=database_app_name - ) - for attempt in Retrying( - stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) - ): - with attempt: - with db_connect( - host=address, password=password - ) as connection, connection.cursor() as cursor: - cursor.execute( - "SELECT EXISTS (SELECT FROM information_schema.tables" - " WHERE table_schema = 'public' AND table_name = 'backup_table_1');" - ) - assert cursor.fetchone()[0], ( - f"replication isn't working correctly: table 'backup_table_1' doesn't exist in {new_unit_name}" - ) - cursor.execute( - "SELECT EXISTS (SELECT FROM information_schema.tables" - " WHERE table_schema = 'public' AND table_name = 'backup_table_2');" - ) - assert not cursor.fetchone()[0], ( - f"replication isn't working correctly: table 'backup_table_2' exists in {new_unit_name}" - ) - connection.close() - - old_primary = await get_primary(ops_test, database_app_name) - logger.info(f"performing a switchover from {old_primary} to {new_unit_name}") - await switchover(ops_test, old_primary, patroni_password, new_unit_name) - - logger.info("checking that the primary unit has changed") - primary = await get_primary(ops_test, database_app_name) - for attempt in Retrying( - stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=30) - ): - with attempt: - assert primary == new_unit_name - - # Ensure stanza is working correctly. - logger.info("listing the available backups to ensure that the stanza is working correctly") - action = await ops_test.model.units.get(new_unit_name).run_action("list-backups") - await action.wait() - backups = action.results.get("backups") - assert backups, "backups not outputted" - - await ops_test.model.wait_for_idle(status="active", timeout=1000) - - # Remove the database app. - await ops_test.model.remove_application(database_app_name) - await ops_test.model.block_until( - lambda: database_app_name not in ops_test.model.applications, timeout=1000 - ) - # Remove the TLS operator. - await ops_test.model.remove_application(tls_certificates_app_name) - await ops_test.model.block_until( - lambda: tls_certificates_app_name not in ops_test.model.applications, timeout=1000 - ) - - -@pytest.mark.group("GCP") -@pytest.mark.abort_on_fail -async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict]) -> None: +async def test_backup_gcp(ops_test: OpsTest, charm, gcp_cloud_configs: tuple[dict, dict]) -> None: """Build and deploy two units of PostgreSQL in GCP and then test the backup and restore actions.""" - config = cloud_configs[0][GCP] - credentials = cloud_configs[1][GCP] + config = gcp_cloud_configs[0] + credentials = gcp_cloud_configs[1] await backup_operations( ops_test, + charm, S3_INTEGRATOR_APP_NAME, tls_certificates_app_name, tls_config, @@ -219,15 +67,18 @@ async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict]) - ) -@pytest.mark.group("GCP") -async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets) -> None: +async def test_restore_on_new_cluster( + ops_test: OpsTest, charm, gcp_cloud_configs: tuple[dict, dict] +) -> None: """Test that is possible to restore a backup to another PostgreSQL cluster.""" previous_database_app_name = f"{DATABASE_APP_NAME}-gcp" database_app_name = f"new-{DATABASE_APP_NAME}" await build_and_deploy( - ops_test, 1, database_app_name=previous_database_app_name, wait_for_idle=False + ops_test, charm, 1, database_app_name=previous_database_app_name, wait_for_idle=False + ) + await build_and_deploy( + ops_test, charm, 1, database_app_name=database_app_name, wait_for_idle=False ) - await build_and_deploy(ops_test, 1, database_app_name=database_app_name, wait_for_idle=False) await ops_test.model.relate(previous_database_app_name, S3_INTEGRATOR_APP_NAME) await ops_test.model.relate(database_app_name, S3_INTEGRATOR_APP_NAME) async with ops_test.fast_forward(): @@ -311,9 +162,8 @@ async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets) -> None connection.close() -@pytest.mark.group("GCP") async def test_invalid_config_and_recovery_after_fixing_it( - ops_test: OpsTest, cloud_configs: tuple[dict, dict] + ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict] ) -> None: """Test that the charm can handle invalid and valid backup configurations.""" database_app_name = f"new-{DATABASE_APP_NAME}" @@ -347,10 +197,10 @@ async def test_invalid_config_and_recovery_after_fixing_it( logger.info( "configuring S3 integrator for a valid cloud, but with the path of another cluster repository" ) - await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(cloud_configs[0][GCP]) + await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(gcp_cloud_configs[0]) action = await ops_test.model.units.get(f"{S3_INTEGRATOR_APP_NAME}/0").run_action( "sync-s3-credentials", - **cloud_configs[1][GCP], + **gcp_cloud_configs[1], ) await action.wait() await wait_for_idle_on_blocked( @@ -363,7 +213,7 @@ async def test_invalid_config_and_recovery_after_fixing_it( # Provide valid backup configurations, with another path in the S3 bucket. logger.info("configuring S3 integrator for a valid cloud") - config = cloud_configs[0][GCP].copy() + config = gcp_cloud_configs[0].copy() config["path"] = f"/postgresql-k8s/{uuid.uuid1()}" await ops_test.model.applications[S3_INTEGRATOR_APP_NAME].set_config(config) logger.info("waiting for the database charm to become active") @@ -372,8 +222,7 @@ async def test_invalid_config_and_recovery_after_fixing_it( ) -@pytest.mark.group("GCP") -async def test_delete_pod(ops_test: OpsTest, github_secrets) -> None: +async def test_delete_pod(ops_test: OpsTest, gcp_cloud_configs: tuple[dict, dict]) -> None: logger.info("Getting original backup config") database_app_name = f"new-{DATABASE_APP_NAME}" original_pgbackrest_config = await cat_file_from_unit( diff --git a/tests/integration/test_backups_pitr.py b/tests/integration/test_backups_pitr_aws.py similarity index 82% rename from tests/integration/test_backups_pitr.py rename to tests/integration/test_backups_pitr_aws.py index 2ea0d66d86..c4d272649e 100644 --- a/tests/integration/test_backups_pitr.py +++ b/tests/integration/test_backups_pitr_aws.py @@ -2,90 +2,35 @@ # Copyright 2023 Canonical Ltd. # See LICENSE file for licensing details. import logging -import uuid -import boto3 -import pytest as pytest +import pytest from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_attempt, wait_exponential from . import architecture +from .conftest import AWS from .helpers import ( DATABASE_APP_NAME, build_and_deploy, - construct_endpoint, db_connect, get_password, get_primary, get_unit_address, scale_application, ) -from .juju_ import juju_major_version CANNOT_RESTORE_PITR = "cannot restore PITR, juju debug-log for details" S3_INTEGRATOR_APP_NAME = "s3-integrator" -if juju_major_version < 3: - tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" - tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} -else: - tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" - tls_config = {"ca-common-name": "Test CA"} +tls_certificates_app_name = "self-signed-certificates" +tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" +tls_config = {"ca-common-name": "Test CA"} logger = logging.getLogger(__name__) -AWS = "AWS" -GCP = "GCP" - - -@pytest.fixture(scope="module") -async def cloud_configs(ops_test: OpsTest, github_secrets) -> None: - # Define some configurations and credentials. - configs = { - AWS: { - "endpoint": "https://s3.amazonaws.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-k8s/{uuid.uuid1()}", - "region": "us-east-1", - }, - GCP: { - "endpoint": "https://storage.googleapis.com", - "bucket": "data-charms-testing", - "path": f"/postgresql-k8s/{uuid.uuid1()}", - "region": "", - }, - } - credentials = { - AWS: { - "access-key": github_secrets["AWS_ACCESS_KEY"], - "secret-key": github_secrets["AWS_SECRET_KEY"], - }, - GCP: { - "access-key": github_secrets["GCP_ACCESS_KEY"], - "secret-key": github_secrets["GCP_SECRET_KEY"], - }, - } - yield configs, credentials - # Delete the previously created objects. - logger.info("deleting the previously created backups") - for cloud, config in configs.items(): - session = boto3.session.Session( - aws_access_key_id=credentials[cloud]["access-key"], - aws_secret_access_key=credentials[cloud]["secret-key"], - region_name=config["region"], - ) - s3 = session.resource( - "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"]) - ) - bucket = s3.Bucket(config["bucket"]) - # GCS doesn't support batch delete operation, so delete the objects one by one. - for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")): - bucket_object.delete() - async def pitr_backup_operations( ops_test: OpsTest, + charm, s3_integrator_app_name: str, tls_certificates_app_name: str, tls_config, @@ -108,7 +53,9 @@ async def pitr_backup_operations( logger.info("deploying the next charms: s3-integrator, self-signed-certificates, postgresql") await ops_test.model.deploy(s3_integrator_app_name) await ops_test.model.deploy(tls_certificates_app_name, config=tls_config, channel=tls_channel) - await build_and_deploy(ops_test, 2, database_app_name=database_app_name, wait_for_idle=False) + await build_and_deploy( + ops_test, charm, 2, database_app_name=database_app_name, wait_for_idle=False + ) logger.info( "integrating self-signed-certificates with postgresql and waiting them to stabilize" @@ -379,36 +326,18 @@ async def pitr_backup_operations( ) -@pytest.mark.group("AWS") @pytest.mark.abort_on_fail -async def test_pitr_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict]) -> None: +async def test_pitr_backup_aws( + ops_test: OpsTest, charm, aws_cloud_configs: tuple[dict, dict] +) -> None: """Build and deploy two units of PostgreSQL in AWS and then test PITR backup and restore actions.""" - config = cloud_configs[0][AWS] - credentials = cloud_configs[1][AWS] + config = aws_cloud_configs[0] + credentials = aws_cloud_configs[1] cloud = AWS.lower() await pitr_backup_operations( ops_test, - S3_INTEGRATOR_APP_NAME, - tls_certificates_app_name, - tls_config, - tls_channel, - credentials, - cloud, - config, - ) - - -@pytest.mark.group("GCP") -@pytest.mark.abort_on_fail -async def test_pitr_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict]) -> None: - """Build and deploy two units of PostgreSQL in GCP and then test PITR backup and restore actions.""" - config = cloud_configs[0][GCP] - credentials = cloud_configs[1][GCP] - cloud = GCP.lower() - - await pitr_backup_operations( - ops_test, + charm, S3_INTEGRATOR_APP_NAME, tls_certificates_app_name, tls_config, diff --git a/tests/integration/test_backups_pitr_gcp.py b/tests/integration/test_backups_pitr_gcp.py new file mode 100644 index 0000000000..b6f1930bce --- /dev/null +++ b/tests/integration/test_backups_pitr_gcp.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python3 +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. +import logging + +import pytest +from pytest_operator.plugin import OpsTest +from tenacity import Retrying, stop_after_attempt, wait_exponential + +from . import architecture +from .conftest import GCP +from .helpers import ( + DATABASE_APP_NAME, + build_and_deploy, + db_connect, + get_password, + get_primary, + get_unit_address, + scale_application, +) + +CANNOT_RESTORE_PITR = "cannot restore PITR, juju debug-log for details" +S3_INTEGRATOR_APP_NAME = "s3-integrator" +tls_certificates_app_name = "self-signed-certificates" +tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" +tls_config = {"ca-common-name": "Test CA"} + +logger = logging.getLogger(__name__) + + +async def pitr_backup_operations( + ops_test: OpsTest, + charm, + s3_integrator_app_name: str, + tls_certificates_app_name: str, + tls_config, + tls_channel, + credentials, + cloud, + config, +) -> None: + """Utility function containing PITR backup and timelines management operations for both cloud tests. + + Below is presented algorithm in the next format: "(timeline): action_1 -> action_2". + 1: table -> backup_b1 -> test_data_td1 -> timestamp_ts1 -> test_data_td2 -> restore_ts1 => 2 + 2: check_td1 -> check_not_td2 -> test_data_td3 -> restore_b1_latest => 3 + 3: check_td1 -> check_td2 -> check_not_td3 -> test_data_td4 -> restore_t2_latest => 4 + 4: check_td1 -> check_not_td2 -> check_td3 -> check_not_td4 + """ + # Set-up environment + database_app_name = f"{DATABASE_APP_NAME}-{cloud}" + + logger.info("deploying the next charms: s3-integrator, self-signed-certificates, postgresql") + await ops_test.model.deploy(s3_integrator_app_name) + await ops_test.model.deploy(tls_certificates_app_name, config=tls_config, channel=tls_channel) + await build_and_deploy( + ops_test, charm, 2, database_app_name=database_app_name, wait_for_idle=False + ) + + logger.info( + "integrating self-signed-certificates with postgresql and waiting them to stabilize" + ) + await ops_test.model.relate(database_app_name, tls_certificates_app_name) + async with ops_test.fast_forward(fast_interval="60s"): + await ops_test.model.wait_for_idle( + apps=[database_app_name, tls_certificates_app_name], + status="active", + timeout=1000, + raise_on_error=False, + ) + + # Configure and set access and secret keys. + logger.info(f"configuring S3 integrator for {cloud}") + await ops_test.model.applications[s3_integrator_app_name].set_config(config) + action = await ops_test.model.units.get(f"{s3_integrator_app_name}/0").run_action( + "sync-s3-credentials", + **credentials, + ) + await action.wait() + + logger.info("integrating s3-integrator with postgresql and waiting model to stabilize") + await ops_test.model.relate(database_app_name, s3_integrator_app_name) + async with ops_test.fast_forward(fast_interval="60s"): + await ops_test.model.wait_for_idle(status="active", timeout=1000) + + primary = await get_primary(ops_test, database_app_name) + for unit in ops_test.model.applications[database_app_name].units: + if unit.name != primary: + replica = unit.name + break + password = await get_password(ops_test, database_app_name=database_app_name) + address = await get_unit_address(ops_test, primary) + + logger.info("1: creating table") + _create_table(address, password) + + logger.info("1: creating backup b1") + action = await ops_test.model.units.get(replica).run_action("create-backup") + await action.wait() + backup_status = action.results.get("backup-status") + assert backup_status, "backup hasn't succeeded" + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(status="active", timeout=1000) + backup_b1 = await _get_most_recent_backup(ops_test, ops_test.model.units.get(replica)) + + logger.info("1: creating test data td1") + _insert_test_data("test_data_td1", address, password) + + logger.info("1: get timestamp ts1") + with db_connect(host=address, password=password) as connection, connection.cursor() as cursor: + cursor.execute("SELECT current_timestamp;") + timestamp_ts1 = str(cursor.fetchone()[0]) + connection.close() + # Wrong timestamp pointing to one year ahead + unreachable_timestamp_ts1 = timestamp_ts1.replace( + timestamp_ts1[:4], str(int(timestamp_ts1[:4]) + 1), 1 + ) + + logger.info("1: creating test data td2") + _insert_test_data("test_data_td2", address, password) + + logger.info("1: switching wal") + _switch_wal(address, password) + + logger.info("1: scaling down to do restore") + async with ops_test.fast_forward(fast_interval="60s"): + await scale_application(ops_test, database_app_name, 1) + remaining_unit = ops_test.model.units.get(f"{database_app_name}/0") + + logger.info("1: restoring the backup b1 with bad restore-to-time parameter") + action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( + "restore", **{"backup-id": backup_b1, "restore-to-time": "bad data"} + ) + await action.wait() + assert action.status == "failed", ( + "1: restore must fail with bad restore-to-time parameter, but that action succeeded" + ) + + logger.info("1: restoring the backup b1 with unreachable restore-to-time parameter") + action = await ops_test.model.units.get(f"{database_app_name}/0").run_action( + "restore", **{"backup-id": backup_b1, "restore-to-time": unreachable_timestamp_ts1} + ) + await action.wait() + logger.info("1: waiting for the database charm to become blocked after restore") + async with ops_test.fast_forward(): + await ops_test.model.block_until( + lambda: ops_test.model.units.get(f"{database_app_name}/0").workload_status_message + == CANNOT_RESTORE_PITR, + timeout=1000, + ) + logger.info( + "1: database charm become in blocked state after restore, as supposed to be with unreachable PITR parameter" + ) + + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + logger.info("1: restoring to the timestamp ts1") + action = await remaining_unit.run_action( + "restore", **{"restore-to-time": timestamp_ts1} + ) + await action.wait() + restore_status = action.results.get("restore-status") + assert restore_status, "1: restore to the timestamp ts1 hasn't succeeded" + await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30) + + logger.info("2: successful restore") + primary = await get_primary(ops_test, database_app_name) + address = await get_unit_address(ops_test, primary) + timeline_t2 = await _get_most_recent_backup(ops_test, remaining_unit) + assert backup_b1 != timeline_t2, "2: timeline 2 do not exist in list-backups action or bad" + + logger.info("2: checking test data td1") + assert _check_test_data("test_data_td1", address, password), "2: test data td1 should exist" + + logger.info("2: checking not test data td2") + assert not _check_test_data("test_data_td2", address, password), ( + "2: test data td2 shouldn't exist" + ) + + logger.info("2: creating test data td3") + _insert_test_data("test_data_td3", address, password) + + logger.info("2: get timestamp ts2") + with db_connect(host=address, password=password) as connection, connection.cursor() as cursor: + cursor.execute("SELECT current_timestamp;") + timestamp_ts2 = str(cursor.fetchone()[0]) + connection.close() + + logger.info("2: creating test data td4") + _insert_test_data("test_data_td4", address, password) + + logger.info("2: switching wal") + _switch_wal(address, password) + + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + logger.info("2: restoring the backup b1 to the latest") + action = await remaining_unit.run_action( + "restore", **{"backup-id": backup_b1, "restore-to-time": "latest"} + ) + await action.wait() + restore_status = action.results.get("restore-status") + assert restore_status, "2: restore the backup b1 to the latest hasn't succeeded" + await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30) + + logger.info("3: successful restore") + primary = await get_primary(ops_test, database_app_name) + address = await get_unit_address(ops_test, primary) + timeline_t3 = await _get_most_recent_backup(ops_test, remaining_unit) + assert backup_b1 != timeline_t3 and timeline_t2 != timeline_t3, ( + "3: timeline 3 do not exist in list-backups action or bad" + ) + + logger.info("3: checking test data td1") + assert _check_test_data("test_data_td1", address, password), "3: test data td1 should exist" + + logger.info("3: checking test data td2") + assert _check_test_data("test_data_td2", address, password), "3: test data td2 should exist" + + logger.info("3: checking not test data td3") + assert not _check_test_data("test_data_td3", address, password), ( + "3: test data td3 shouldn't exist" + ) + + logger.info("3: checking not test data td4") + assert not _check_test_data("test_data_td4", address, password), ( + "3: test data td4 shouldn't exist" + ) + + logger.info("3: switching wal") + _switch_wal(address, password) + + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + logger.info("3: restoring the timeline 2 to the latest") + action = await remaining_unit.run_action( + "restore", **{"backup-id": timeline_t2, "restore-to-time": "latest"} + ) + await action.wait() + restore_status = action.results.get("restore-status") + assert restore_status, "3: restore the timeline 2 to the latest hasn't succeeded" + await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30) + + logger.info("4: successful restore") + primary = await get_primary(ops_test, database_app_name) + address = await get_unit_address(ops_test, primary) + timeline_t4 = await _get_most_recent_backup(ops_test, remaining_unit) + assert ( + backup_b1 != timeline_t4 and timeline_t2 != timeline_t4 and timeline_t3 != timeline_t4 + ), "4: timeline 4 do not exist in list-backups action or bad" + + logger.info("4: checking test data td1") + assert _check_test_data("test_data_td1", address, password), "4: test data td1 should exist" + + logger.info("4: checking not test data td2") + assert not _check_test_data("test_data_td2", address, password), ( + "4: test data td2 shouldn't exist" + ) + + logger.info("4: checking test data td3") + assert _check_test_data("test_data_td3", address, password), "4: test data td3 should exist" + + logger.info("4: checking test data td4") + assert _check_test_data("test_data_td4", address, password), "4: test data td4 should exist" + + logger.info("4: switching wal") + _switch_wal(address, password) + + for attempt in Retrying( + stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30) + ): + with attempt: + logger.info("4: restoring to the timestamp ts2") + action = await remaining_unit.run_action( + "restore", **{"restore-to-time": timestamp_ts2} + ) + await action.wait() + restore_status = action.results.get("restore-status") + assert restore_status, "4: restore to the timestamp ts2 hasn't succeeded" + await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30) + + logger.info("5: successful restore") + primary = await get_primary(ops_test, database_app_name) + address = await get_unit_address(ops_test, primary) + timeline_t5 = await _get_most_recent_backup(ops_test, remaining_unit) + assert ( + backup_b1 != timeline_t5 + and timeline_t2 != timeline_t5 + and timeline_t3 != timeline_t5 + and timeline_t4 != timeline_t5 + ), "5: timeline 5 do not exist in list-backups action or bad" + + logger.info("5: checking test data td1") + assert _check_test_data("test_data_td1", address, password), "5: test data td1 should exist" + + logger.info("5: checking not test data td2") + assert not _check_test_data("test_data_td2", address, password), ( + "5: test data td2 shouldn't exist" + ) + + logger.info("5: checking test data td3") + assert _check_test_data("test_data_td3", address, password), "5: test data td3 should exist" + + logger.info("5: checking not test data td4") + assert not _check_test_data("test_data_td4", address, password), ( + "5: test data td4 shouldn't exist" + ) + + await ops_test.model.wait_for_idle(status="active", timeout=1000) + + # Remove the database app. + await ops_test.model.remove_application(database_app_name) + await ops_test.model.block_until( + lambda: database_app_name not in ops_test.model.applications, timeout=1000 + ) + # Remove the TLS operator. + await ops_test.model.remove_application(tls_certificates_app_name) + await ops_test.model.block_until( + lambda: tls_certificates_app_name not in ops_test.model.applications, timeout=1000 + ) + + +@pytest.mark.abort_on_fail +async def test_pitr_backup_gcp( + ops_test: OpsTest, charm, gcp_cloud_configs: tuple[dict, dict] +) -> None: + """Build and deploy two units of PostgreSQL in GCP and then test PITR backup and restore actions.""" + config = gcp_cloud_configs[0] + credentials = gcp_cloud_configs[1] + cloud = GCP.lower() + + await pitr_backup_operations( + ops_test, + charm, + S3_INTEGRATOR_APP_NAME, + tls_certificates_app_name, + tls_config, + tls_channel, + credentials, + cloud, + config, + ) + + +def _create_table(host: str, password: str): + with db_connect(host=host, password=password) as connection: + connection.autocommit = True + connection.cursor().execute("CREATE TABLE IF NOT EXISTS backup_table (test_column TEXT);") + connection.close() + + +def _insert_test_data(td: str, host: str, password: str): + with db_connect(host=host, password=password) as connection: + connection.autocommit = True + connection.cursor().execute( + "INSERT INTO backup_table (test_column) VALUES (%s);", + (td,), + ) + connection.close() + + +def _check_test_data(td: str, host: str, password: str) -> bool: + with db_connect(host=host, password=password) as connection, connection.cursor() as cursor: + cursor.execute( + "SELECT EXISTS (SELECT 1 FROM backup_table WHERE test_column = %s);", + (td,), + ) + res = cursor.fetchone()[0] + connection.close() + return res + + +def _switch_wal(host: str, password: str): + with db_connect(host=host, password=password) as connection: + connection.autocommit = True + connection.cursor().execute("SELECT pg_switch_wal();") + connection.close() + + +async def _get_most_recent_backup(ops_test: OpsTest, unit: any) -> str: + logger.info("listing the available backups") + action = await unit.run_action("list-backups") + await action.wait() + backups = action.results.get("backups") + assert backups, "backups not outputted" + await ops_test.model.wait_for_idle(status="active", timeout=1000) + most_recent_backup = backups.split("\n")[-1] + return most_recent_backup.split()[0] diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 9e29ac16d0..9b8f444e51 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -13,6 +13,7 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed +from .ha_tests.helpers import get_cluster_roles from .helpers import ( CHARM_BASE, METADATA, @@ -36,21 +37,19 @@ UNIT_IDS = [0, 1, 2] -@pytest.mark.group(1) @pytest.mark.abort_on_fail @pytest.mark.skip_if_deployed -async def test_build_and_deploy(ops_test: OpsTest): +async def test_build_and_deploy(ops_test: OpsTest, charm): """Build the charm-under-test and deploy it. Assert on the unit status before any relations/configurations take place. """ async with ops_test.fast_forward(): - await build_and_deploy(ops_test, len(UNIT_IDS), APP_NAME) + await build_and_deploy(ops_test, charm, len(UNIT_IDS), APP_NAME) for unit_id in UNIT_IDS: assert ops_test.model.applications[APP_NAME].units[unit_id].workload_status == "active" -@pytest.mark.group(1) async def test_application_created_required_resources(ops_test: OpsTest) -> None: # Compare the k8s resources that the charm and Patroni should create with # the currently created k8s resources. @@ -60,7 +59,6 @@ async def test_application_created_required_resources(ops_test: OpsTest) -> None assert set(existing_resources) == set(expected_resources) -@pytest.mark.group(1) @pytest.mark.parametrize("unit_id", UNIT_IDS) async def test_labels_consistency_across_pods(ops_test: OpsTest, unit_id: int) -> None: model = ops_test.model.info @@ -72,7 +70,6 @@ async def test_labels_consistency_across_pods(ops_test: OpsTest, unit_id: int) - assert pod.metadata.labels["cluster-name"] == f"patroni-{APP_NAME}" -@pytest.mark.group(1) @pytest.mark.parametrize("unit_id", UNIT_IDS) async def test_database_is_up(ops_test: OpsTest, unit_id: int): # Query Patroni REST API and check the status that indicates @@ -82,7 +79,6 @@ async def test_database_is_up(ops_test: OpsTest, unit_id: int): assert result.status_code == 200 -@pytest.mark.group(1) @pytest.mark.parametrize("unit_id", UNIT_IDS) async def test_exporter_is_up(ops_test: OpsTest, unit_id: int): # Query exporter metrics endpoint and check the status that indicates @@ -95,7 +91,6 @@ async def test_exporter_is_up(ops_test: OpsTest, unit_id: int): ) -@pytest.mark.group(1) @pytest.mark.parametrize("unit_id", UNIT_IDS) async def test_settings_are_correct(ops_test: OpsTest, unit_id: int): password = await get_password(ops_test) @@ -169,7 +164,6 @@ async def test_settings_are_correct(ops_test: OpsTest, unit_id: int): assert settings["postgresql"]["remove_data_directory_on_diverged_timelines"] is True -@pytest.mark.group(1) async def test_postgresql_parameters_change(ops_test: OpsTest) -> None: """Test that's possible to change PostgreSQL parameters.""" await ops_test.model.applications[APP_NAME].set_config({ @@ -213,7 +207,6 @@ async def test_postgresql_parameters_change(ops_test: OpsTest) -> None: connection.close() -@pytest.mark.group(1) async def test_cluster_is_stable_after_leader_deletion(ops_test: OpsTest) -> None: """Tests that the cluster maintains a primary after the primary is deleted.""" # Find the current primary unit. @@ -236,7 +229,6 @@ async def test_cluster_is_stable_after_leader_deletion(ops_test: OpsTest) -> Non assert await get_primary(ops_test, down_unit=primary) != "None" -@pytest.mark.group(1) async def test_scale_down_and_up(ops_test: OpsTest): """Test data is replicated to new units after a scale up.""" # Ensure the initial number of units in the application. @@ -262,7 +254,21 @@ async def test_scale_down_and_up(ops_test: OpsTest): await scale_application(ops_test, APP_NAME, initial_scale) -@pytest.mark.group(1) +async def test_switchover_sync_standby(ops_test: OpsTest): + original_roles = await get_cluster_roles( + ops_test, ops_test.model.applications[APP_NAME].units[0].name + ) + run_action = await ops_test.model.units[original_roles["sync_standbys"][0]].run_action( + "promote-to-primary", scope="unit" + ) + await run_action.wait() + await ops_test.model.wait_for_idle(status="active", timeout=200) + new_roles = await get_cluster_roles( + ops_test, ops_test.model.applications[APP_NAME].units[0].name + ) + assert new_roles["primaries"][0] == original_roles["sync_standbys"][0] + + async def test_persist_data_through_graceful_restart(ops_test: OpsTest): """Test data persists through a graceful restart.""" primary = await get_primary(ops_test) @@ -291,7 +297,6 @@ async def test_persist_data_through_graceful_restart(ops_test: OpsTest): connection.cursor().execute("SELECT * FROM gracetest;") -@pytest.mark.group(1) async def test_persist_data_through_failure(ops_test: OpsTest): """Test data persists through a failure.""" primary = await get_primary(ops_test) @@ -332,7 +337,6 @@ async def test_persist_data_through_failure(ops_test: OpsTest): connection.cursor().execute("SELECT * FROM failtest;") -@pytest.mark.group(1) async def test_automatic_failover_after_leader_issue(ops_test: OpsTest) -> None: """Tests that an automatic failover is triggered after an issue happens in the leader.""" # Find the current primary unit. @@ -350,7 +354,6 @@ async def test_automatic_failover_after_leader_issue(ops_test: OpsTest) -> None: assert await get_primary(ops_test) != "None" -@pytest.mark.group(1) async def test_application_removal(ops_test: OpsTest) -> None: # Remove the application to trigger some hooks (like peer relation departed). await ops_test.model.applications[APP_NAME].remove() @@ -376,10 +379,8 @@ async def test_application_removal(ops_test: OpsTest) -> None: assert APP_NAME not in ops_test.model.applications -@pytest.mark.group(1) -async def test_redeploy_charm_same_model(ops_test: OpsTest): +async def test_redeploy_charm_same_model(ops_test: OpsTest, charm): """Redeploy the charm in the same model to test that it works.""" - charm = await ops_test.build_charm(".") async with ops_test.fast_forward(): await ops_test.model.deploy( charm, @@ -399,8 +400,7 @@ async def test_redeploy_charm_same_model(ops_test: OpsTest): ) -@pytest.mark.group(1) -async def test_redeploy_charm_same_model_after_forcing_removal(ops_test: OpsTest) -> None: +async def test_redeploy_charm_same_model_after_forcing_removal(ops_test: OpsTest, charm) -> None: """Redeploy the charm in the same model to test that it works after a forceful removal.""" return_code, _, stderr = await ops_test.juju( "remove-application", APP_NAME, "--destroy-storage", "--force", "--no-prompt", "--no-wait" @@ -423,7 +423,6 @@ async def test_redeploy_charm_same_model_after_forcing_removal(ops_test: OpsTest assert set(existing_resources) == set(expected_resources) # Check that the charm can be deployed again. - charm = await ops_test.build_charm(".") async with ops_test.fast_forward(): await ops_test.model.deploy( charm, @@ -447,13 +446,11 @@ async def test_redeploy_charm_same_model_after_forcing_removal(ops_test: OpsTest ) -@pytest.mark.group(1) -async def test_storage_with_more_restrictive_permissions(ops_test: OpsTest): +async def test_storage_with_more_restrictive_permissions(ops_test: OpsTest, charm): """Test that the charm can be deployed with a storage with more restrictive permissions.""" app_name = f"test-storage-{APP_NAME}" async with ops_test.fast_forward(): # Deploy and wait for the charm to get into the install hook (maintenance status). - charm = await ops_test.build_charm(".") async with ops_test.fast_forward(): await ops_test.model.deploy( charm, diff --git a/tests/integration/test_config.py b/tests/integration/test_config.py index 8a8aea6156..229450b715 100644 --- a/tests/integration/test_config.py +++ b/tests/integration/test_config.py @@ -16,13 +16,12 @@ logger = logging.getLogger(__name__) -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_config_parameters(ops_test: OpsTest) -> None: +async def test_config_parameters(ops_test: OpsTest, charm) -> None: """Build and deploy one unit of PostgreSQL and then test config with wrong parameters.""" # Build and deploy the PostgreSQL charm. async with ops_test.fast_forward(): - await build_and_deploy(ops_test, 1) + await build_and_deploy(ops_test, charm, 1) await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") @@ -30,18 +29,32 @@ async def test_config_parameters(ops_test: OpsTest) -> None: test_string = "abcXYZ123" configs = [ + {"synchronous_node_count": ["0", "1"]}, # config option is greater than 0 + { + "synchronous_node_count": [test_string, "all"] + }, # config option is one of `all`, `minority` or `majority` + {"connection_authentication_timeout": ["0", "60"]}, # config option is from 1 and 600 + {"connection_statement_timeout": ["-1", "0"]}, # config option is from 0 to 2147483647 { "durability_synchronous_commit": [test_string, "on"] }, # config option is one of `on`, `remote_apply` or `remote_write` + { + "instance_default_text_search_config": [test_string, "pg_catalog.simple"] + }, # config option is validated against the db { "instance_max_locks_per_transaction": ["-1", "64"] }, # config option is between 64 and 2147483647 { "instance_password_encryption": [test_string, "scram-sha-256"] }, # config option is one of `md5` or `scram-sha-256` + {"logging_client_min_messages": [test_string, "notice"]}, + # config option is one of 'debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'log', 'notice', 'warning' or 'error'. { "logging_log_min_duration_statement": ["-2", "-1"] }, # config option is between -1 and 2147483647 + { + "logging_track_functions": [test_string, "none"] + }, # config option is one of 'none', 'pl', 'all'. { "memory_maintenance_work_mem": ["1023", "65536"] }, # config option is between 1024 and 2147483647 @@ -52,21 +65,101 @@ async def test_config_parameters(ops_test: OpsTest) -> None: { "optimizer_constraint_exclusion": [test_string, "partition"] }, # config option is one of `on`, `off` or `partition` + { + "optimizer_cpu_index_tuple_cost": ["-1", "0.005"] + }, # config option is between 0 and 1.80E+308 + { + "optimizer_cpu_operator_cost": ["-1", "0.0025"] + }, # config option is between 0 and 1.80E+308 + {"optimizer_cpu_tuple_cost": ["-1", "0.01"]}, # config option is between 0 and 1.80E+308 + {"optimizer_cursor_tuple_fraction": ["-1", "0.1"]}, # config option is between 0 and 1 { "optimizer_default_statistics_target": ["0", "100"] }, # config option is between 1 and 10000 {"optimizer_from_collapse_limit": ["0", "8"]}, # config option is between 1 and 2147483647 + {"optimizer_geqo_effort": ["-1", "5"]}, # config option is between 1 and 10 + {"optimizer_geqo_generations": ["-1", "0"]}, # config option is between 1 and 2147483647 + {"optimizer_geqo_pool_size": ["-1", "0"]}, # config option is between 1 and 2147483647 + {"optimizer_geqo_seed": ["-1", "0.0"]}, # config option is between 1 and 1 + {"optimizer_geqo_selection_bias": ["-1", "2.0"]}, # config option is between 1 and 2 + {"optimizer_geqo_threshold": ["-1", "12"]}, # config option is between 1 and 2147483647 + { + "optimizer_jit_above_cost": ["-2", "100000.0"] + }, # config option is between -1 and 1.80E+308 + { + "optimizer_jit_inline_above_cost": ["-2", "500000.0"] + }, # config option is between -1 and 1.80E+308 + { + "optimizer_jit_optimize_above_cost": ["-2", "500000.0"] + }, # config option is between -1 and 1.80E+308 {"optimizer_join_collapse_limit": ["0", "8"]}, # config option is between 1 and 2147483647 + { + "optimizer_min_parallel_index_scan_size": ["-1", "64"] + }, # config option is between 0 and 715827882 + { + "optimizer_min_parallel_table_scan_size": ["-1", "1024"] + }, # config option is between 0 and 715827882 + { + "optimizer_parallel_setup_cost": ["-1", "1000.0"] + }, # config option is between 0 and 1.80E+308 + { + "optimizer_parallel_tuple_cost": ["-1", "0.1"] + }, # config option is between 0 and 1.80E+308 {"profile": [test_string, "testing"]}, # config option is one of `testing` or `production` - # {"profile_limit_memory": {"127", "128"}}, # config option is between 128 and 9999999 + # { + # "profile_limit_memory": {"127", "128"} + # }, # config option is between 128 and 9999999 + { + "request_backslash_quote": [test_string, "safe_encoding"] + }, # config option is one of `safe_encoding` and `on` and `off` + { + "request_date_style": [test_string, "ISO, MDY"] + }, # config option is validated against the db + {"request_deadlock_timeout": ["-1", "1000"]}, # config option is between 1 and 2147483647 + { + "request_default_transaction_isolation": [test_string, "read committed"] + }, # config option is one of `serializable`, `repeatable read`, `read committed`, `read uncommitted`. + {"request_lock_timeout": ["-1", "0"]}, # config option is between 0 and 2147483647 + {"request_time_zone": [test_string, "UTC"]}, # config option is validated against the db + { + "request_track_activity_query_size": ["-1", "1024"] + }, # config option is between 100 and 1048576 + {"request_xmlbinary": [test_string, "base64"]}, # config option is one of `base64`, `hex`. + { + "request_xmloption": [test_string, "content"] + }, # config option is one of `content`, `document`. { "response_bytea_output": [test_string, "hex"] }, # config option is one of `escape` or `hex` + {"response_extra_float_digits": ["5", "1"]}, # config option is between -15 and 3 { - "vacuum_autovacuum_analyze_scale_factor": ["-1", "0.1"] - }, # config option is between 0 and 100 + "response_gin_fuzzy_search_limit": ["-1", "0"] + }, # config option is between 0 and 2147483647 { - "vacuum_autovacuum_vacuum_scale_factor": ["-1", "0.2"] + "response_lc_monetary": [test_string, "C"] + }, # allowed values are the locales available in the unit. + { + "response_lc_numeric": [test_string, "C"] + }, # allowed values are the locales available in the unit. + { + "response_lc_time": [test_string, "C"] + }, # allowed values are the locales available in the unit. + { + "session_idle_in_transaction_session_timeout": ["-1", "0"] + }, # config option is between 0 and 2147483647 + { + "storage_bgwriter_lru_maxpages": ["-1", "100"] + }, # config option is between 0 and 1073741823 + {"storage_bgwriter_lru_multiplier": ["-1", "2.0"]}, # config option is between 0 and 10 + { + "storage_default_table_access_method": [test_string, "heap"] + }, # config option entries can be created using the CREATE ACCESS METHOD SQL command. default `heap` + { + "storage_gin_pending_list_limit": ["-1", "4096"] + }, # config option is between 64 and 2147483647 + {"storage_old_snapshot_threshold": ["-2", "-1"]}, # config option is between -1 and 86400 + { + "vacuum_autovacuum_analyze_scale_factor": ["-1", "0.1"] }, # config option is between 0 and 100 { "vacuum_autovacuum_analyze_threshold": ["-1", "50"] @@ -74,19 +167,48 @@ async def test_config_parameters(ops_test: OpsTest) -> None: { "vacuum_autovacuum_freeze_max_age": ["99999", "200000000"] }, # config option is between 100000 and 2000000000 + {"vacuum_autovacuum_naptime": ["-1", "60"]}, # config option is between 1 and 2147483 { "vacuum_autovacuum_vacuum_cost_delay": ["-2", "2.0"] }, # config option is between -1 and 100 + { + "vacuum_autovacuum_vacuum_cost_limit": ["-2", "-1"] + }, # config option is between -1 and 10000 + { + "vacuum_autovacuum_vacuum_insert_scale_factor": ["-1", "0.2"] + }, # config option is between 0 and 100 + { + "vacuum_autovacuum_vacuum_insert_threshold": ["-2", "1000"] + }, # config option is between -1 and 2147483647 + { + "vacuum_autovacuum_vacuum_scale_factor": ["-1", "0.2"] + }, # config option is between 0 and 100 + { + "vacuum_autovacuum_vacuum_threshold": ["-1", "50"] + }, # config option is between 0 and 2147483647 + {"vacuum_vacuum_cost_delay": ["-1", "0.0"]}, # config option is between 0 and 100 + {"vacuum_vacuum_cost_limit": ["-1", "200"]}, # config option is between 1 and 10000 + {"vacuum_vacuum_cost_page_dirty": ["-1", "20"]}, # config option is between 0 and 10000 + {"vacuum_vacuum_cost_page_hit": ["-1", "1"]}, # config option is between 0 and 10000 + {"vacuum_vacuum_cost_page_miss": ["-1", "2"]}, # config option is between 0 and 10000 + { + "vacuum_vacuum_failsafe_age": ["-1", "1600000000"] + }, # config option is between 0 and 2100000000 + { + "vacuum_vacuum_freeze_min_age": ["-1", "50000000"] + }, # config option is between 0 and 1000000000 { "vacuum_vacuum_freeze_table_age": ["-1", "150000000"] }, # config option is between 0 and 2000000000 { - "instance_default_text_search_config": [test_string, "pg_catalog.simple"] - }, # config option is validated against the db + "vacuum_vacuum_multixact_failsafe_age": ["-1", "1600000000"] + }, # config option is between 0 and 2100000000 { - "request_date_style": [test_string, "ISO, MDY"] - }, # config option is validated against the db - {"request_time_zone": [test_string, "UTC"]}, # config option is validated against the db + "vacuum_vacuum_multixact_freeze_min_age": ["-1", "5000000"] + }, # config option is between 0 and 1000000000 + { + "vacuum_vacuum_multixact_freeze_table_age": ["-1", "150000000"] + }, # config option is between 0 and 2000000000 ] charm_config = {} diff --git a/tests/integration/test_db.py b/tests/integration/test_db.py deleted file mode 100644 index 658aa6b713..0000000000 --- a/tests/integration/test_db.py +++ /dev/null @@ -1,249 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. -import logging -from asyncio import gather - -import pytest -from pytest_operator.plugin import OpsTest - -from . import markers -from .helpers import ( - APPLICATION_NAME, - CHARM_BASE, - DATABASE_APP_NAME, - build_and_deploy, - check_database_creation, - check_database_users_existence, - deploy_and_relate_application_with_postgresql, - get_leader_unit, - wait_for_relation_removed_between, -) - -EXTENSIONS_BLOCKING_MESSAGE = "extensions requested through relation" -FINOS_WALTZ_APP_NAME = "finos-waltz" -ANOTHER_FINOS_WALTZ_APP_NAME = "another-finos-waltz" -APPLICATION_UNITS = 1 -DATABASE_UNITS = 3 -ROLES_BLOCKING_MESSAGE = ( - "roles requested through relation, use postgresql_client interface instead" -) - -logger = logging.getLogger(__name__) - - -@pytest.mark.group(1) -@markers.amd64_only # finos-waltz-k8s charm not available for arm64 -async def test_finos_waltz_db(ops_test: OpsTest) -> None: - """Deploy Finos Waltz to test the 'db' relation. - - Args: - ops_test: The ops test framework - """ - async with ops_test.fast_forward(): - # Build and deploy the PostgreSQL charm. - await build_and_deploy(ops_test, DATABASE_UNITS) - - assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == DATABASE_UNITS - - for unit in ops_test.model.applications[DATABASE_APP_NAME].units: - assert unit.workload_status == "active" - - # Deploy and test the first deployment of Finos Waltz. - relation_id = await deploy_and_relate_application_with_postgresql( - ops_test, - "finos-waltz-k8s", - FINOS_WALTZ_APP_NAME, - APPLICATION_UNITS, - channel="edge", - base="ubuntu@20.04", - ) - await check_database_creation(ops_test, "waltz") - - finos_waltz_users = [f"relation_id_{relation_id}"] - - await check_database_users_existence(ops_test, finos_waltz_users, []) - - # Deploy and test another deployment of Finos Waltz. - another_relation_id = await deploy_and_relate_application_with_postgresql( - ops_test, - "finos-waltz-k8s", - ANOTHER_FINOS_WALTZ_APP_NAME, - APPLICATION_UNITS, - channel="edge", - base="ubuntu@20.04", - ) - # In this case, the database name is the same as in the first deployment - # because it's a fixed value in Finos Waltz charm. - await check_database_creation(ops_test, "waltz") - - another_finos_waltz_users = [f"relation_id_{another_relation_id}"] - - await check_database_users_existence( - ops_test, finos_waltz_users + another_finos_waltz_users, [] - ) - - # Remove second relation and validate that related users were deleted - await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( - f"{DATABASE_APP_NAME}:db", f"{ANOTHER_FINOS_WALTZ_APP_NAME}" - ) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=1000) - await check_database_users_existence( - ops_test, finos_waltz_users, another_finos_waltz_users - ) - - # Remove first relation and validate that related users were deleted - await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( - f"{DATABASE_APP_NAME}:db", f"{FINOS_WALTZ_APP_NAME}" - ) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=1000) - await check_database_users_existence(ops_test, [], finos_waltz_users) - - # Remove the first and second deployment of Finos Waltz. - await ops_test.model.remove_application(FINOS_WALTZ_APP_NAME, block_until_done=True) - await ops_test.model.remove_application( - ANOTHER_FINOS_WALTZ_APP_NAME, block_until_done=True - ) - - -@pytest.mark.group(1) -@markers.amd64_only # finos-waltz-k8s charm not available for arm64 -# (and this test depends on previous test with finos-waltz-k8s charm) -async def test_extensions_blocking(ops_test: OpsTest) -> None: - await ops_test.model.deploy( - APPLICATION_NAME, - application_name=APPLICATION_NAME, - base=CHARM_BASE, - channel="edge", - ) - await ops_test.model.deploy( - APPLICATION_NAME, - application_name=f"{APPLICATION_NAME}2", - base=CHARM_BASE, - channel="edge", - ) - - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", - timeout=1000, - ) - - await gather( - ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db"), - ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}2:db"), - ) - - leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) - await ops_test.model.block_until( - lambda: leader_unit.workload_status_message == EXTENSIONS_BLOCKING_MESSAGE, timeout=1000 - ) - - assert leader_unit.workload_status_message == EXTENSIONS_BLOCKING_MESSAGE - - logger.info("Verify that the charm remains blocked if there are other blocking relations") - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" - ) - - await ops_test.model.block_until( - lambda: leader_unit.workload_status_message == EXTENSIONS_BLOCKING_MESSAGE, timeout=1000 - ) - - assert leader_unit.workload_status_message == EXTENSIONS_BLOCKING_MESSAGE - - logger.info("Verify that active status is restored when all blocking relations are gone") - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}2:db" - ) - - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], - status="active", - timeout=1000, - ) - - logger.info("Verifying that the charm doesn't block when the extensions are enabled") - config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} - await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active") - await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - timeout=2000, - ) - - logger.info("Verifying that the charm unblocks when the extensions are enabled") - config = {"plugin_pg_trgm_enable": "False", "plugin_unaccent_enable": "False"} - await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" - ) - wait_for_relation_removed_between(ops_test, DATABASE_APP_NAME, APPLICATION_NAME) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME, APPLICATION_NAME], status="active") - - await ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db") - await ops_test.model.block_until( - lambda: leader_unit.workload_status_message == EXTENSIONS_BLOCKING_MESSAGE, timeout=1000 - ) - - config = {"plugin_pg_trgm_enable": "True", "plugin_unaccent_enable": "True"} - await ops_test.model.applications[DATABASE_APP_NAME].set_config(config) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME], - status="active", - raise_on_blocked=False, - timeout=2000, - ) - # removing relation to test roles - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" - ) - - -@pytest.mark.group(1) -@markers.amd64_only # finos-waltz-k8s charm not available for arm64 -# (and this test depends on a previous test with finos-waltz-k8s charm) -async def test_roles_blocking(ops_test: OpsTest) -> None: - config = {"legacy_roles": "true"} - await ops_test.model.applications[APPLICATION_NAME].set_config(config) - await ops_test.model.applications[f"{APPLICATION_NAME}2"].set_config(config) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, APPLICATION_NAME, f"{APPLICATION_NAME}2"], - status="active", - ) - - await gather( - ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db"), - ops_test.model.relate(f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}2:db"), - ) - - leader_unit = await get_leader_unit(ops_test, DATABASE_APP_NAME) - await ops_test.model.block_until( - lambda: leader_unit.workload_status_message == ROLES_BLOCKING_MESSAGE, timeout=1000 - ) - - assert leader_unit.workload_status_message == ROLES_BLOCKING_MESSAGE - - logger.info("Verify that the charm remains blocked if there are other blocking relations") - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}:db" - ) - - await ops_test.model.block_until( - lambda: leader_unit.workload_status_message == ROLES_BLOCKING_MESSAGE, timeout=1000 - ) - - assert leader_unit.workload_status_message == ROLES_BLOCKING_MESSAGE - - logger.info("Verify that active status is restored when all blocking relations are gone") - await ops_test.model.applications[DATABASE_APP_NAME].destroy_relation( - f"{DATABASE_APP_NAME}:db", f"{APPLICATION_NAME}2:db" - ) - - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME], - status="active", - timeout=1000, - ) diff --git a/tests/integration/test_db_admin.py b/tests/integration/test_db_admin.py deleted file mode 100644 index 8d1a5c94fa..0000000000 --- a/tests/integration/test_db_admin.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2022 Canonical Ltd. -# See LICENSE file for licensing details. -import asyncio - -import pytest as pytest -from pytest_operator.plugin import OpsTest - -from . import markers -from .helpers import ( - DATABASE_APP_NAME, - build_and_deploy, - check_database_creation, - check_database_users_existence, - get_unit_address, -) - -DISCOURSE_APP_NAME = "discourse-charmers-discourse-k8s" -REDIS_APP_NAME = "redis-k8s" -APPLICATION_UNITS = 1 -DATABASE_UNITS = 3 - - -@pytest.mark.group(1) -@markers.amd64_only # discourse-charmers-discourse-k8s charm contains amd64-only binaries (pyyaml) -@pytest.mark.abort_on_fail -async def test_discourse_from_discourse_charmers(ops_test: OpsTest): - # Build and deploy charm from local source folder (and also redis from Charmhub). - # Both are needed by Discourse. - async with ops_test.fast_forward(): - await asyncio.gather( - build_and_deploy(ops_test, DATABASE_UNITS), - ops_test.model.deploy( - REDIS_APP_NAME, application_name=REDIS_APP_NAME, base="ubuntu@20.04" - ), - ) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, REDIS_APP_NAME], status="active", timeout=1500 - ) - - # Get the Redis instance IP address. - redis_host = await get_unit_address(ops_test, f"{REDIS_APP_NAME}/0") - - # Deploy Discourse and wait for it to be blocked waiting for database relation. - await ops_test.model.deploy( - DISCOURSE_APP_NAME, - application_name=DISCOURSE_APP_NAME, - config={ - "redis_host": redis_host, - "developer_emails": "user@foo.internal", - "external_hostname": "foo.internal", - "smtp_address": "127.0.0.1", - "smtp_domain": "foo.internal", - }, - ) - # Discourse becomes blocked waiting for PostgreSQL relation. - await ops_test.model.wait_for_idle(apps=[DISCOURSE_APP_NAME], status="blocked", timeout=1000) - - # Relate PostgreSQL and Discourse, waiting for Discourse to be ready. - relation = await ops_test.model.add_relation( - f"{DATABASE_APP_NAME}:db-admin", - DISCOURSE_APP_NAME, - ) - await ops_test.model.wait_for_idle( - apps=[DATABASE_APP_NAME, DISCOURSE_APP_NAME, REDIS_APP_NAME], - status="active", - timeout=2000, # Discourse takes a longer time to become active (a lot of setup). - ) - - # Check for the correct databases and users creation. - await check_database_creation(ops_test, "discourse-charmers-discourse-k8s") - discourse_users = [f"relation_id_{relation.id}"] - await check_database_users_existence(ops_test, discourse_users, [], admin=True) - - # Remove Discourse relation and validate that related users were deleted - await ops_test.model.applications[DATABASE_APP_NAME].remove_relation( - f"{DATABASE_APP_NAME}:db-admin", f"{DISCOURSE_APP_NAME}" - ) - await ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=1000) - await check_database_users_existence(ops_test, [], discourse_users) - - # Remove the deployment of Discourse. - await ops_test.model.remove_application(DISCOURSE_APP_NAME, block_until_done=True) diff --git a/tests/integration/test_password_rotation.py b/tests/integration/test_password_rotation.py index 3bf0db0a52..62f6422ea7 100644 --- a/tests/integration/test_password_rotation.py +++ b/tests/integration/test_password_rotation.py @@ -27,16 +27,14 @@ APP_NAME = METADATA["name"] -@pytest.mark.group(1) @pytest.mark.abort_on_fail @pytest.mark.skip_if_deployed -async def test_deploy_active(ops_test: OpsTest): +async def test_deploy_active(ops_test: OpsTest, charm): """Build the charm and deploy it.""" async with ops_test.fast_forward(): - await build_and_deploy(ops_test, 3, database_app_name=APP_NAME) + await build_and_deploy(ops_test, charm, 3, database_app_name=APP_NAME) -@pytest.mark.group(1) async def test_password_rotation(ops_test: OpsTest): """Test password rotation action.""" # Get the initial passwords set for the system users. @@ -108,7 +106,6 @@ async def test_password_rotation(ops_test: OpsTest): assert await check_patroni(ops_test, unit.name, restart_time) -@pytest.mark.group(1) @markers.juju_secrets async def test_password_from_secret_same_as_cli(ops_test: OpsTest): """Checking if password is same as returned by CLI. @@ -133,7 +130,6 @@ async def test_password_from_secret_same_as_cli(ops_test: OpsTest): assert data[secret_id]["content"]["Data"]["replication-password"] == password -@pytest.mark.group(1) async def test_empty_password(ops_test: OpsTest) -> None: """Test that the password can't be set to an empty string.""" leader_unit = await get_leader_unit(ops_test, APP_NAME) @@ -146,7 +142,6 @@ async def test_empty_password(ops_test: OpsTest) -> None: assert password == "None" -@pytest.mark.group(1) async def test_db_connection_with_empty_password(ops_test: OpsTest): """Test that user can't connect with empty password.""" primary = await get_primary(ops_test) @@ -155,7 +150,6 @@ async def test_db_connection_with_empty_password(ops_test: OpsTest): connection.close() -@pytest.mark.group(1) async def test_no_password_change_on_invalid_password(ops_test: OpsTest) -> None: """Test that in general, there is no change when password validation fails.""" leader_unit = await get_leader_unit(ops_test, APP_NAME) @@ -168,7 +162,6 @@ async def test_no_password_change_on_invalid_password(ops_test: OpsTest) -> None assert password1 == password2 -@pytest.mark.group(1) async def test_no_password_exposed_on_logs(ops_test: OpsTest) -> None: """Test that passwords don't get exposed on postgresql logs.""" for unit in ops_test.model.applications[APP_NAME].units: diff --git a/tests/integration/test_plugins.py b/tests/integration/test_plugins.py index a628f28915..ef4e796eff 100644 --- a/tests/integration/test_plugins.py +++ b/tests/integration/test_plugins.py @@ -62,8 +62,10 @@ HYPOPG_EXTENSION_STATEMENT = "CREATE TABLE hypopg_test (id integer, val text); SELECT hypopg_create_index('CREATE INDEX ON hypopg_test (id)');" IP4R_EXTENSION_STATEMENT = "CREATE TABLE ip4r_test (ip ip4);" JSONB_PLPERL_EXTENSION_STATEMENT = "CREATE OR REPLACE FUNCTION jsonb_plperl_test(val jsonb) RETURNS jsonb TRANSFORM FOR TYPE jsonb LANGUAGE plperl as $$ return $_[0]; $$;" -ORAFCE_EXTENSION_STATEMENT = "SELECT add_months(date '2005-05-31',1);" -PG_SIMILARITY_EXTENSION_STATEMENT = "SHOW pg_similarity.levenshtein_threshold;" +ORAFCE_EXTENSION_STATEMENT = "SELECT oracle.add_months(date '2005-05-31',1);" +PG_SIMILARITY_EXTENSION_STATEMENT = ( + "SET pg_similarity.levenshtein_threshold = 0.7; SELECT 'aaa', 'aab', lev('aaa','aab');" +) PLPERL_EXTENSION_STATEMENT = "CREATE OR REPLACE FUNCTION plperl_test(name text) RETURNS text AS $$ return $_SHARED{$_[0]}; $$ LANGUAGE plperl;" PREFIX_EXTENSION_STATEMENT = "SELECT '123'::prefix_range @> '123456';" RDKIT_EXTENSION_STATEMENT = "SELECT is_valid_smiles('CCC');" @@ -88,13 +90,13 @@ TIMESCALEDB_EXTENSION_STATEMENT = "CREATE TABLE test_timescaledb (time TIMESTAMPTZ NOT NULL); SELECT create_hypertable('test_timescaledb', 'time');" -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_plugins(ops_test: OpsTest) -> None: +async def test_plugins(ops_test: OpsTest, charm) -> None: """Build and deploy one unit of PostgreSQL and then test the available plugins.""" # Build and deploy the PostgreSQL charm. async with ops_test.fast_forward(): - await build_and_deploy(ops_test, 2) + # TODO Figure out how to deal with pgaudit + await build_and_deploy(ops_test, charm, 2, extra_config={"plugin_audit_enable": "False"}) sql_tests = { "plugin_citext_enable": CITEXT_EXTENSION_STATEMENT, @@ -203,7 +205,6 @@ def enable_disable_config(enabled: False): connection.close() -@pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_plugin_objects(ops_test: OpsTest) -> None: """Checks if charm gets blocked when trying to disable a plugin in use.""" diff --git a/tests/integration/test_storage.py b/tests/integration/test_storage.py index fe936685aa..3c4ade4203 100644 --- a/tests/integration/test_storage.py +++ b/tests/integration/test_storage.py @@ -7,6 +7,7 @@ import pytest from pytest_operator.plugin import OpsTest +from . import markers from .helpers import ( DATABASE_APP_NAME, STORAGE_PATH, @@ -17,17 +18,16 @@ logger = logging.getLogger(__name__) -MAX_RETRIES = 20 INSUFFICIENT_SIZE_WARNING = "<10% free space on pgdata volume." -@pytest.mark.group(1) +@markers.amd64_only @pytest.mark.abort_on_fail -async def test_filling_and_emptying_pgdata_storage(ops_test: OpsTest): +async def test_filling_and_emptying_pgdata_storage(ops_test: OpsTest, charm): """Build and deploy the charm and saturate its pgdata volume.""" # Build and deploy the PostgreSQL charm. async with ops_test.fast_forward(): - await build_and_deploy(ops_test, 1) + await build_and_deploy(ops_test, charm, 1) # Saturate pgdata storage with random data primary = await get_primary(ops_test, DATABASE_APP_NAME) diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py index 71a04eaf06..2b6a0f0fd9 100644 --- a/tests/integration/test_tls.py +++ b/tests/integration/test_tls.py @@ -8,7 +8,7 @@ from pytest_operator.plugin import OpsTest from tenacity import Retrying, stop_after_delay, wait_fixed -from . import architecture, markers +from . import architecture from .ha_tests.helpers import ( change_patroni_setting, ) @@ -16,13 +16,10 @@ CHARM_BASE, DATABASE_APP_NAME, build_and_deploy, - check_database_creation, - check_database_users_existence, check_tls, check_tls_patroni_api, check_tls_replication, db_connect, - deploy_and_relate_application_with_postgresql, get_password, get_primary, get_unit_address, @@ -33,24 +30,17 @@ logger = logging.getLogger(__name__) -MATTERMOST_APP_NAME = "mattermost" -if juju_major_version < 3: - tls_certificates_app_name = "tls-certificates-operator" - tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable" - tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"} -else: - tls_certificates_app_name = "self-signed-certificates" - tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" - tls_config = {"ca-common-name": "Test CA"} +tls_certificates_app_name = "self-signed-certificates" +tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable" +tls_config = {"ca-common-name": "Test CA"} APPLICATION_UNITS = 2 DATABASE_UNITS = 3 -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest) -> None: +async def test_build_and_deploy(ops_test: OpsTest, charm) -> None: """Build and deploy three units of PostgreSQL.""" - await build_and_deploy(ops_test, DATABASE_UNITS, wait_for_idle=False) + await build_and_deploy(ops_test, charm, DATABASE_UNITS, wait_for_idle=False) async def check_tls_rewind(ops_test: OpsTest) -> None: @@ -72,7 +62,6 @@ async def check_tls_rewind(ops_test: OpsTest) -> None: ) -@pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_tls(ops_test: OpsTest) -> None: async with ops_test.fast_forward(): @@ -99,7 +88,7 @@ async def test_tls(ops_test: OpsTest) -> None: patroni_password = await get_password(ops_test, "patroni") cluster_info = requests.get(f"https://{primary_address}:8008/cluster", verify=False) for member in cluster_info.json()["members"]: - if member["role"] == "replica": + if member["role"] != "leader": replica = "/".join(member["name"].rsplit("-", 1)) # Check if TLS enabled for replication @@ -123,7 +112,7 @@ async def test_tls(ops_test: OpsTest) -> None: await run_command_on_unit( ops_test, replica, - 'su postgres -c "/usr/lib/postgresql/14/bin/pg_ctl -D /var/lib/postgresql/data/pgdata promote"', + 'su postgres -c "/usr/lib/postgresql/16/bin/pg_ctl -D /var/lib/postgresql/data/pgdata promote"', ) # Check that the replica was promoted. @@ -173,34 +162,6 @@ async def test_tls(ops_test: OpsTest) -> None: ) -@pytest.mark.group(1) -@markers.amd64_only # mattermost-k8s charm not available for arm64 -async def test_mattermost_db(ops_test: OpsTest) -> None: - """Deploy Mattermost to test the 'db' relation. - - Mattermost needs TLS enabled on PostgreSQL to correctly connect to it. - - Args: - ops_test: The ops test framework - """ - async with ops_test.fast_forward(): - # Deploy and check Mattermost user and database existence. - relation_id = await deploy_and_relate_application_with_postgresql( - ops_test, - "mattermost-k8s", - MATTERMOST_APP_NAME, - APPLICATION_UNITS, - status="waiting", - base="ubuntu@20.04", - ) - await check_database_creation(ops_test, "mattermost") - - mattermost_users = [f"relation_id_{relation_id}"] - - await check_database_users_existence(ops_test, mattermost_users, []) - - -@pytest.mark.group(1) async def test_remove_tls(ops_test: OpsTest) -> None: async with ops_test.fast_forward(): # Remove the relation. diff --git a/tests/integration/test_trust.py b/tests/integration/test_trust.py index 8de2491ed8..fe41f85c06 100644 --- a/tests/integration/test_trust.py +++ b/tests/integration/test_trust.py @@ -2,16 +2,13 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -import asyncio import logging -import time import pytest from pytest_operator.plugin import OpsTest from .helpers import ( CHARM_BASE, - KUBECTL, METADATA, get_leader_unit, ) @@ -19,78 +16,17 @@ logger = logging.getLogger(__name__) APP_NAME = "untrusted-postgresql-k8s" -MAX_RETRIES = 20 UNTRUST_ERROR_MESSAGE = f"Insufficient permissions, try: `juju trust {APP_NAME} --scope=cluster`" -@pytest.mark.group(1) -async def test_enable_rbac(ops_test: OpsTest): - """Enables RBAC from inside test runner's environment. - - Assert on permission enforcement being active. - """ - enable_rbac_call = await asyncio.create_subprocess_exec( - "sudo", - "microk8s", - "enable", - "rbac", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - await enable_rbac_call.communicate() - - is_default_auth = None - retries = 0 - while is_default_auth != "no" and retries < MAX_RETRIES: - rbac_check = await asyncio.create_subprocess_exec( - *KUBECTL.split(), - "auth", - "can-i", - "get", - "cm", - "-A", - "--as=system:serviceaccount:default:no-permissions", - stdout=asyncio.subprocess.PIPE, - stderr=asyncio.subprocess.PIPE, - ) - stdout, _ = await rbac_check.communicate() - if stdout: - is_default_auth = stdout.decode().split()[0] - logger.info(f"Response from rbac check ('no' means enabled): {is_default_auth}") - retries += 1 - - assert is_default_auth == "no" - - -@pytest.mark.group(1) -async def test_model_connectivity(ops_test: OpsTest): - """Tries to regain connectivity to model after microK8s restart.""" - retries = 0 - while retries < MAX_RETRIES: - try: - await ops_test.model.connect_current() - status = await ops_test.model.get_status() - logger.info(f"Connection established: {status}") - return - except Exception as e: - logger.info(f"Connection attempt failed: {e}") - retries += 1 - logger.info(f"Retrying ({retries}/{MAX_RETRIES})...") - time.sleep(3) - - logger.error(f"Max retries number of {MAX_RETRIES} reached. Unable to connect.") - assert False - - -@pytest.mark.group(1) @pytest.mark.abort_on_fail -async def test_deploy_without_trust(ops_test: OpsTest, database_charm): +async def test_deploy_without_trust(ops_test: OpsTest, charm): """Build and deploy the charm with trust set to false. Assert on the unit status being blocked due to lack of trust. """ await ops_test.model.deploy( - database_charm, + charm, resources={ "postgresql-image": METADATA["resources"]["postgresql-image"]["upstream-source"] }, @@ -113,7 +49,6 @@ async def test_deploy_without_trust(ops_test: OpsTest, database_charm): assert leader_unit.workload_status_message == UNTRUST_ERROR_MESSAGE -@pytest.mark.group(1) async def test_trust_blocked_deployment(ops_test: OpsTest): """Trust existing blocked deployment. diff --git a/tests/integration/test_wrong_arch.py b/tests/integration/test_wrong_arch.py index 4b9980ca3d..83e81c6cf4 100644 --- a/tests/integration/test_wrong_arch.py +++ b/tests/integration/test_wrong_arch.py @@ -7,7 +7,6 @@ import pathlib import typing -import pytest from pytest_operator.plugin import OpsTest from . import markers @@ -26,7 +25,6 @@ async def fetch_charm( return packed_charms[0].resolve(strict=True) -@pytest.mark.group(1) @markers.amd64_only async def test_arm_charm_on_amd_host(ops_test: OpsTest) -> None: """Tries deploying an arm64 charm on amd64 host.""" @@ -49,7 +47,6 @@ async def test_arm_charm_on_amd_host(ops_test: OpsTest) -> None: ) -@pytest.mark.group(1) @markers.arm64_only async def test_amd_charm_on_arm_host(ops_test: OpsTest) -> None: """Tries deploying an amd64 charm on arm64 host.""" diff --git a/tests/spread/test_async_replication.py/task.yaml b/tests/spread/test_async_replication.py/task.yaml new file mode 100644 index 0000000000..fc10167323 --- /dev/null +++ b/tests/spread/test_async_replication.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_async_replication.py +environment: + TEST_MODULE: ha_tests/test_async_replication.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_audit.py/task.yaml b/tests/spread/test_audit.py/task.yaml new file mode 100644 index 0000000000..9cbc84e43d --- /dev/null +++ b/tests/spread/test_audit.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_audit.py +environment: + TEST_MODULE: test_audit.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_backups_aws.py/task.yaml b/tests/spread/test_backups_aws.py/task.yaml new file mode 100644 index 0000000000..c7eb541232 --- /dev/null +++ b/tests/spread/test_backups_aws.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_backups_aws.py +environment: + TEST_MODULE: test_backups_aws.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +backends: + - -lxd-vm # Requires CI secrets diff --git a/tests/spread/test_backups_gcp.py/task.yaml b/tests/spread/test_backups_gcp.py/task.yaml new file mode 100644 index 0000000000..c0dc3ac976 --- /dev/null +++ b/tests/spread/test_backups_gcp.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_backups_gcp.py +environment: + TEST_MODULE: test_backups_gcp.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +backends: + - -lxd-vm # Requires CI secrets diff --git a/tests/spread/test_backups_pitr_aws.py/task.yaml b/tests/spread/test_backups_pitr_aws.py/task.yaml new file mode 100644 index 0000000000..4ac59fbf85 --- /dev/null +++ b/tests/spread/test_backups_pitr_aws.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_backups_pitr_aws.py +environment: + TEST_MODULE: test_backups_pitr_aws.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +backends: + - -lxd-vm # Requires CI secrets diff --git a/tests/spread/test_backups_pitr_gcp.py/task.yaml b/tests/spread/test_backups_pitr_gcp.py/task.yaml new file mode 100644 index 0000000000..a6b31a59a6 --- /dev/null +++ b/tests/spread/test_backups_pitr_gcp.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_backups_pitr_gcp.py +environment: + TEST_MODULE: test_backups_pitr_gcp.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +backends: + - -lxd-vm # Requires CI secrets diff --git a/tests/spread/test_charm.py/task.yaml b/tests/spread/test_charm.py/task.yaml new file mode 100644 index 0000000000..96450bdc32 --- /dev/null +++ b/tests/spread/test_charm.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_charm.py +environment: + TEST_MODULE: test_charm.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_config.py/task.yaml b/tests/spread/test_config.py/task.yaml new file mode 100644 index 0000000000..f330f89b38 --- /dev/null +++ b/tests/spread/test_config.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_config.py +environment: + TEST_MODULE: test_config.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_new_relations_1.py/task.yaml b/tests/spread/test_new_relations_1.py/task.yaml new file mode 100644 index 0000000000..0c64fe771f --- /dev/null +++ b/tests/spread/test_new_relations_1.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_new_relations_1.py +environment: + TEST_MODULE: new_relations/test_new_relations_1.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_new_relations_2.py/task.yaml b/tests/spread/test_new_relations_2.py/task.yaml new file mode 100644 index 0000000000..e8a02d4eea --- /dev/null +++ b/tests/spread/test_new_relations_2.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_new_relations_2.py +environment: + TEST_MODULE: new_relations/test_new_relations_2.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_password_rotation.py/task.yaml b/tests/spread/test_password_rotation.py/task.yaml new file mode 100644 index 0000000000..439559b4e6 --- /dev/null +++ b/tests/spread/test_password_rotation.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_password_rotation.py +environment: + TEST_MODULE: test_password_rotation.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_plugins.py/task.yaml b/tests/spread/test_plugins.py/task.yaml new file mode 100644 index 0000000000..e9dce8e28f --- /dev/null +++ b/tests/spread/test_plugins.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_plugins.py +environment: + TEST_MODULE: test_plugins.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_relations_coherence.py/task.yaml b/tests/spread/test_relations_coherence.py/task.yaml new file mode 100644 index 0000000000..bff0e492b3 --- /dev/null +++ b/tests/spread/test_relations_coherence.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_relations_coherence.py +environment: + TEST_MODULE: new_relations/test_relations_coherence.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_replication.py/task.yaml b/tests/spread/test_replication.py/task.yaml new file mode 100644 index 0000000000..237cc3981b --- /dev/null +++ b/tests/spread/test_replication.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_replication.py +environment: + TEST_MODULE: ha_tests/test_replication.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_restart.py/task.yaml b/tests/spread/test_restart.py/task.yaml new file mode 100644 index 0000000000..5ea1f451d7 --- /dev/null +++ b/tests/spread/test_restart.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_restart.py +environment: + TEST_MODULE: ha_tests/test_restart.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_self_healing_1.py/task.yaml b/tests/spread/test_self_healing_1.py/task.yaml new file mode 100644 index 0000000000..4532290fd1 --- /dev/null +++ b/tests/spread/test_self_healing_1.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_self_healing_1.py +environment: + TEST_MODULE: ha_tests/test_self_healing_1.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_self_healing_2.py/task.yaml b/tests/spread/test_self_healing_2.py/task.yaml new file mode 100644 index 0000000000..e06e899c08 --- /dev/null +++ b/tests/spread/test_self_healing_2.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_self_healing_2.py +environment: + TEST_MODULE: ha_tests/test_self_healing_2.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_smoke.py/task.yaml b/tests/spread/test_smoke.py/task.yaml new file mode 100644 index 0000000000..d4ce1fa995 --- /dev/null +++ b/tests/spread/test_smoke.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_smoke.py +environment: + TEST_MODULE: ha_tests/test_smoke.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +systems: + - -ubuntu-24.04-arm diff --git a/tests/spread/test_storage.py/task.yaml b/tests/spread/test_storage.py/task.yaml new file mode 100644 index 0000000000..3d71c6f199 --- /dev/null +++ b/tests/spread/test_storage.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_storage.py +environment: + TEST_MODULE: test_storage.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_synchronous_policy.py/task.yaml b/tests/spread/test_synchronous_policy.py/task.yaml new file mode 100644 index 0000000000..fada7cb4fb --- /dev/null +++ b/tests/spread/test_synchronous_policy.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_synchronous_policy.py +environment: + TEST_MODULE: ha_tests/test_synchronous_policy.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_tls.py/task.yaml b/tests/spread/test_tls.py/task.yaml new file mode 100644 index 0000000000..a605744913 --- /dev/null +++ b/tests/spread/test_tls.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_tls.py +environment: + TEST_MODULE: test_tls.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_trust.py/task.yaml b/tests/spread/test_trust.py/task.yaml new file mode 100644 index 0000000000..8a96a65212 --- /dev/null +++ b/tests/spread/test_trust.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_trust.py +environment: + TEST_MODULE: test_trust.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_upgrade.py/task.yaml b/tests/spread/test_upgrade.py/task.yaml new file mode 100644 index 0000000000..b3be366921 --- /dev/null +++ b/tests/spread/test_upgrade.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_upgrade.py +environment: + TEST_MODULE: ha_tests/test_upgrade.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/spread/test_upgrade_from_stable.py/task.yaml b/tests/spread/test_upgrade_from_stable.py/task.yaml new file mode 100644 index 0000000000..8b16a572a1 --- /dev/null +++ b/tests/spread/test_upgrade_from_stable.py/task.yaml @@ -0,0 +1,9 @@ +summary: test_upgrade_from_stable.py +environment: + TEST_MODULE: ha_tests/test_upgrade_from_stable.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results +systems: + - -ubuntu-24.04-arm diff --git a/tests/spread/test_wrong_arch.py/task.yaml b/tests/spread/test_wrong_arch.py/task.yaml new file mode 100644 index 0000000000..1cf6db7996 --- /dev/null +++ b/tests/spread/test_wrong_arch.py/task.yaml @@ -0,0 +1,7 @@ +summary: test_wrong_arch.py +environment: + TEST_MODULE: test_wrong_arch.py +execute: | + tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results" +artifacts: + - allure-results diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index eb0faf7410..d7f5c13e0a 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -8,27 +8,9 @@ # This causes every test defined in this file to run 2 times, each with # charm.JujuVersion.has_secrets set as True or as False -@pytest.fixture(params=[True, False], autouse=True) +@pytest.fixture(autouse=True) def juju_has_secrets(request, monkeypatch): - monkeypatch.setattr("charm.JujuVersion.has_secrets", PropertyMock(return_value=request.param)) - return request.param - - -@pytest.fixture -def only_with_juju_secrets(juju_has_secrets): - """Pretty way to skip Juju 3 tests.""" - if not juju_has_secrets: - pytest.skip("Secrets test only applies on Juju 3.x") - - -@pytest.fixture -def only_without_juju_secrets(juju_has_secrets): - """Pretty way to skip Juju 2-specific tests. - - Typically: to save CI time, when the same check were executed in a Juju 3-specific way already - """ - if juju_has_secrets: - pytest.skip("Skipping legacy secrets tests") + monkeypatch.setattr("charm.JujuVersion.has_secrets", PropertyMock(return_value=True)) @pytest.fixture(autouse=True) diff --git a/tests/unit/test_async_replication.py b/tests/unit/test_async_replication.py index c7623a802f..cc87a5817d 100644 --- a/tests/unit/test_async_replication.py +++ b/tests/unit/test_async_replication.py @@ -20,29 +20,27 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def standby(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.set_model_name("standby") + harness = Harness(PostgresqlOperatorCharm) + harness.set_model_name("standby") - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() - yield harness - harness.cleanup() + yield harness + harness.cleanup() @pytest.mark.parametrize("relation_name", RELATION_NAMES) @@ -316,7 +314,7 @@ def test_promote_to_primary(harness, relation_name): ) harness.update_relation_data(rel_id, "standby/0", {"unit-address": "10.2.2.10"}) - harness.run_action("promote-to-primary") + harness.run_action("promote-to-primary", {"scope": "cluster"}) assert ( harness.get_relation_data(rel_id, harness.charm.app.name).get( diff --git a/tests/unit/test_backups.py b/tests/unit/test_backups.py index 9ef8aec9ee..03cbfb7773 100644 --- a/tests/unit/test_backups.py +++ b/tests/unit/test_backups.py @@ -26,19 +26,18 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Mock generic sync client to avoid search to ~/.kube/config. - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() + # Mock generic sync client to avoid search to ~/.kube/config. + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_stanza_name(harness): @@ -206,7 +205,7 @@ def test_can_use_s3_repository(harness): patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, patch( "charm.Patroni.rock_postgresql_version", - new_callable=PropertyMock(return_value="14.10"), + new_callable=PropertyMock(return_value="16.6"), ) as _rock_postgresql_version, patch("charm.PostgreSQLBackups._execute_command") as _execute_command, patch( @@ -289,6 +288,18 @@ def test_can_use_s3_repository(harness): ] assert harness.charm.backup.can_use_s3_repository() == (True, None) + # Empty db + _execute_command.side_effect = [ + ( + f'[{{"db": [], "name": "another-model.{harness.charm.cluster_name}"}}]', + None, + ) + ] + assert harness.charm.backup.can_use_s3_repository() == ( + False, + ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE, + ) + def test_construct_endpoint(harness): # Test with an AWS endpoint without region. diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index c26f9eae36..19ea176baf 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -26,7 +26,7 @@ from charm import EXTENSION_OBJECT_MESSAGE, PostgresqlOperatorCharm from constants import PEER, SECRET_INTERNAL_LABEL -from patroni import NotReadyError +from patroni import NotReadyError, SwitchoverFailedError, SwitchoverNotSyncError from tests.unit.helpers import _FakeApiError POSTGRESQL_CONTAINER = "postgresql" @@ -41,14 +41,25 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.handle_exec("postgresql", ["locale", "-a"], result="C") + harness = Harness(PostgresqlOperatorCharm) + harness.handle_exec("postgresql", ["locale", "-a"], result="C") + + harness.add_relation(PEER, "postgresql-k8s") + harness.begin() + harness.add_relation("restart", harness.charm.app.name) + yield harness + harness.cleanup() + - harness.add_relation(PEER, "postgresql-k8s") +def test_set_ports(): + with ( + patch("charm.JujuVersion") as _juju_version, + patch("charm.PostgresqlOperatorCharm.unit") as _unit, + ): + harness = Harness(PostgresqlOperatorCharm) harness.begin() - harness.add_relation("restart", harness.charm.app.name) - yield harness + _unit.set_ports.assert_called_once_with(5432, 8008) + harness.cleanup() @@ -198,7 +209,7 @@ def test_on_postgresql_pebble_ready(harness): patch("charm.PostgresqlOperatorCharm._on_leader_elected"), patch("charm.PostgresqlOperatorCharm._create_pgdata") as _create_pgdata, ): - _rock_postgresql_version.return_value = "14.7" + _rock_postgresql_version.return_value = "16.6" # Mock the primary endpoint ready property values. _primary_endpoint_ready.side_effect = [False, True, True] @@ -255,7 +266,7 @@ def test_on_postgresql_pebble_ready_no_connection(harness): ): mock_event = MagicMock() mock_event.workload = harness.model.unit.get_container(POSTGRESQL_CONTAINER) - _rock_postgresql_version.return_value = "14.7" + _rock_postgresql_version.return_value = "16.6" harness.charm._on_postgresql_pebble_ready(mock_event) @@ -279,6 +290,9 @@ def test_on_config_changed(harness): "charm.PostgreSQLUpgrade.idle", return_value=False, new_callable=PropertyMock ) as _idle, patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, + patch( + "charm.PostgresqlOperatorCharm.updated_synchronous_node_count", return_value=True + ) as _updated_synchronous_node_count, patch("charm.Patroni.member_started", return_value=True, new_callable=PropertyMock), patch("charm.Patroni.get_primary"), patch( @@ -321,6 +335,14 @@ def test_on_config_changed(harness): harness.charm._on_config_changed(mock_event) assert isinstance(harness.charm.unit.status, ActiveStatus) assert not _enable_disable_extensions.called + _updated_synchronous_node_count.assert_called_once_with() + + # Deferst on update sync nodes failure + _updated_synchronous_node_count.return_value = False + harness.charm._on_config_changed(mock_event) + mock_event.defer.assert_called_once_with() + mock_event.defer.reset_mock() + _updated_synchronous_node_count.return_value = True # Leader enables extensions with harness.hooks_disabled(): @@ -331,17 +353,10 @@ def test_on_config_changed(harness): def test_on_get_password(harness): - # Create a mock event and set passwords in peer relation data. + harness.set_leader() mock_event = MagicMock(params={}) - rel_id = harness.model.get_relation(PEER).id - harness.update_relation_data( - rel_id, - harness.charm.app.name, - { - "operator-password": "test-password", - "replication-password": "replication-test-password", - }, - ) + harness.charm.set_secret("app", "operator-password", "test-password") + harness.charm.set_secret("app", "replication-password", "replication-test-password") # Test providing an invalid username. mock_event.params["username"] = "user" @@ -457,6 +472,12 @@ def test_on_update_status(harness): patch("ops.model.Container.pebble") as _pebble, patch("ops.model.Container.restart") as _restart, patch("upgrade.PostgreSQLUpgrade.idle", return_value="idle"), + patch( + "charm.PostgresqlOperatorCharm.is_standby_leader", + new_callable=PropertyMock, + return_value=False, + ), + patch("charm.Patroni.get_running_cluster_members", return_value=["test"]), ): # Early exit on can connect. harness.set_can_connect(POSTGRESQL_CONTAINER, False) @@ -663,11 +684,14 @@ def test_on_peer_relation_departed(harness): patch( "charm.PostgresqlOperatorCharm._get_endpoints_to_remove" ) as _get_endpoints_to_remove, - patch("charm.PostgresqlOperatorCharm._peers", new_callable=PropertyMock) as _peers, + patch( + "charm.PostgresqlOperatorCharm.app_peer_data", new_callable=PropertyMock + ) as _app_peer_data, patch( "charm.PostgresqlOperatorCharm._get_endpoints_to_remove", return_value=sentinel.units ) as _get_endpoints_to_remove, patch("charm.PostgresqlOperatorCharm._remove_from_endpoints") as _remove_from_endpoints, + patch("charm.PostgresqlOperatorCharm.updated_synchronous_node_count"), ): # Early exit if not leader event = Mock() @@ -680,7 +704,7 @@ def test_on_peer_relation_departed(harness): harness.charm._on_peer_relation_departed(event) event.defer.assert_called_once_with() - _peers.return_value.data = {harness.charm.app: {"cluster_initialised": True}} + _app_peer_data.return_value = {"cluster_initialised": True} harness.charm._on_peer_relation_departed(event) _get_endpoints_to_remove.assert_called_once_with() _remove_from_endpoints.assert_called_once_with(sentinel.units) @@ -1054,12 +1078,8 @@ def test_client_relations(harness): # Test when the charm has some relations. harness.add_relation("database", "application") - harness.add_relation("db", "legacy-application") - harness.add_relation("db-admin", "legacy-admin-application") database_relation = harness.model.get_relation("database") - db_relation = harness.model.get_relation("db") - db_admin_relation = harness.model.get_relation("db-admin") - assert harness.charm.client_relations == [database_relation, db_relation, db_admin_relation] + assert harness.charm.client_relations == [database_relation] def test_validate_config_options(harness): @@ -1119,32 +1139,6 @@ def test_scope_obj(harness): assert harness.charm._scope_obj("test") is None -def test_get_secret_from_databag(harness): - """Asserts that get_secret method can read secrets from databag. - - This must be backwards-compatible so it runs on both juju2 and juju3. - """ - with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): - rel_id = harness.model.get_relation(PEER).id - # App level changes require leader privileges - harness.set_leader() - # Test application scope. - assert harness.charm.get_secret("app", "operator_password") is None - harness.update_relation_data( - rel_id, harness.charm.app.name, {"operator_password": "test-password"} - ) - assert harness.charm.get_secret("app", "operator_password") == "test-password" - - # Unit level changes don't require leader privileges - harness.set_leader(False) - # Test unit scope. - assert harness.charm.get_secret("unit", "operator_password") is None - harness.update_relation_data( - rel_id, harness.charm.unit.name, {"operator_password": "test-password"} - ) - assert harness.charm.get_secret("unit", "operator_password") == "test-password" - - def test_on_get_password_secrets(harness): with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): # Create a mock event and set passwords in peer relation data. @@ -1182,40 +1176,6 @@ def test_get_secret_secrets(harness, scope): assert harness.charm.get_secret(scope, "operator-password") == "test-password" -def test_set_secret_in_databag(harness, only_without_juju_secrets): - """Asserts that set_secret method writes to relation databag. - - This is juju2 specific. In juju3, set_secret writes to juju secrets. - """ - with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): - rel_id = harness.model.get_relation(PEER).id - harness.set_leader() - - # Test application scope. - assert "password" not in harness.get_relation_data(rel_id, harness.charm.app.name) - harness.charm.set_secret("app", "password", "test-password") - assert ( - harness.get_relation_data(rel_id, harness.charm.app.name)["password"] - == "test-password" - ) - harness.charm.set_secret("app", "password", None) - assert "password" not in harness.get_relation_data(rel_id, harness.charm.app.name) - - # Test unit scope. - assert "password" not in harness.get_relation_data(rel_id, harness.charm.unit.name) - harness.charm.set_secret("unit", "password", "test-password") - assert ( - harness.get_relation_data(rel_id, harness.charm.unit.name)["password"] - == "test-password" - ) - harness.charm.set_secret("unit", "password", None) - assert "password" not in harness.get_relation_data(rel_id, harness.charm.unit.name) - - with pytest.raises(RuntimeError): - harness.charm.set_secret("test", "password", "test") - assert False - - @pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) def test_set_reset_new_secret(harness, scope, is_leader): """NOTE: currently ops.testing seems to allow for non-leader to set secrets too!""" @@ -1248,7 +1208,7 @@ def test_invalid_secret(harness, scope, is_leader): assert harness.charm.get_secret(scope, "somekey") is None -def test_delete_password(harness, juju_has_secrets, caplog): +def test_delete_password(harness, caplog): """NOTE: currently ops.testing seems to allow for non-leader to remove secrets too!""" with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): harness.set_leader(True) @@ -1263,14 +1223,7 @@ def test_delete_password(harness, juju_has_secrets, caplog): harness.set_leader(True) with caplog.at_level(logging.DEBUG): - if juju_has_secrets: - error_message = ( - "Non-existing secret operator-password was attempted to be removed." - ) - else: - error_message = ( - "Non-existing field 'operator-password' was attempted to be removed" - ) + error_message = "Non-existing secret operator-password was attempted to be removed." harness.charm.remove_secret("app", "operator-password") assert error_message in caplog.text @@ -1292,32 +1245,7 @@ def test_delete_password(harness, juju_has_secrets, caplog): @pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) -def test_migration_from_databag(harness, only_with_juju_secrets, scope, is_leader): - """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage. - - Since it checks for a migration from databag to juju secrets, it's specific to juju3. - """ - with patch("charm.PostgresqlOperatorCharm._on_leader_elected"): - rel_id = harness.model.get_relation(PEER).id - # App has to be leader, unit can be either - harness.set_leader(is_leader) - - # Getting current password - entity = getattr(harness.charm, scope) - harness.update_relation_data(rel_id, entity.name, {"operator_password": "bla"}) - assert harness.charm.get_secret(scope, "operator_password") == "bla" - - # Reset new secret - harness.charm.set_secret(scope, "operator-password", "blablabla") - assert harness.charm.model.get_secret(label=f"{PEER}.postgresql-k8s.{scope}") - assert harness.charm.get_secret(scope, "operator-password") == "blablabla" - assert "operator-password" not in harness.get_relation_data( - rel_id, getattr(harness.charm, scope).name - ) - - -@pytest.mark.parametrize("scope,is_leader", [("app", True), ("unit", True), ("unit", False)]) -def test_migration_from_single_secret(harness, only_with_juju_secrets, scope, is_leader): +def test_migration_from_single_secret(harness, scope, is_leader): """Check if we're moving on to use secrets when live upgrade from databag to Secrets usage. Since it checks for a migration from databag to juju secrets, it's specific to juju3. @@ -1670,6 +1598,7 @@ def test_update_config(harness): harness.update_relation_data( rel_id, harness.charm.unit.name, {"tls": ""} ) # Mock some data in the relation to test that it doesn't change. + _is_tls_enabled.return_value = False harness.charm.update_config() _handle_postgresql_restart_need.assert_not_called() assert "tls" not in harness.get_relation_data(rel_id, harness.charm.unit.name) @@ -1715,6 +1644,7 @@ def test_handle_postgresql_restart_need(harness): def test_set_active_status(harness): with ( patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch("charm.Patroni.get_running_cluster_members", return_value=["test"]), patch( "charm.PostgresqlOperatorCharm.is_standby_leader", new_callable=PropertyMock ) as _is_standby_leader, @@ -1747,7 +1677,9 @@ def test_set_active_status(harness): assert isinstance(harness.charm.unit.status, MaintenanceStatus) else: _is_standby_leader.side_effect = None - _is_standby_leader.return_value = values[1] + _is_standby_leader.return_value = ( + values[0] != harness.charm.unit.name and values[1] + ) harness.charm._set_active_status() assert isinstance( harness.charm.unit.status, @@ -1822,3 +1754,42 @@ def test_get_plugins(harness): "insert_username", "moddatetime", ] + + +def test_on_promote_to_primary(harness): + with ( + patch("charm.PostgreSQLAsyncReplication.promote_to_primary") as _promote_to_primary, + patch("charm.Patroni.switchover") as _switchover, + ): + event = Mock() + event.params = {"scope": "cluster"} + + # Cluster + harness.charm._on_promote_to_primary(event) + _promote_to_primary.assert_called_once_with(event) + + # Unit, no force, regular promotion + event.params = {"scope": "unit"} + + harness.charm._on_promote_to_primary(event) + + _switchover.assert_called_once_with("postgresql-k8s/0", wait=False) + + # Unit, no force, switchover failed + event.params = {"scope": "unit"} + _switchover.side_effect = SwitchoverFailedError + + harness.charm._on_promote_to_primary(event) + + event.fail.assert_called_once_with( + "Switchover failed or timed out, check the logs for details" + ) + event.fail.reset_mock() + + # Unit, no force, not sync + event.params = {"scope": "unit"} + _switchover.side_effect = SwitchoverNotSyncError + + harness.charm._on_promote_to_primary(event) + + event.fail.assert_called_once_with("Unit is not sync standby") diff --git a/tests/unit/test_db.py b/tests/unit/test_db.py deleted file mode 100644 index ddcbec8390..0000000000 --- a/tests/unit/test_db.py +++ /dev/null @@ -1,502 +0,0 @@ -# Copyright 2023 Canonical Ltd. -# See LICENSE file for licensing details. - -from unittest.mock import Mock, PropertyMock, patch - -import pytest -from charms.postgresql_k8s.v0.postgresql import ( - PostgreSQLCreateDatabaseError, - PostgreSQLCreateUserError, - PostgreSQLGetPostgreSQLVersionError, -) -from ops import Unit -from ops.framework import EventBase -from ops.model import ActiveStatus, BlockedStatus -from ops.testing import Harness - -from charm import PostgresqlOperatorCharm -from constants import DATABASE_PORT, PEER - -DATABASE = "test_database" -RELATION_NAME = "db" -POSTGRESQL_VERSION = "14" - - -@pytest.fixture(autouse=True) -def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, f"{harness.charm.app.name}/1") - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() - - -def clear_relation_data(_harness): - data = { - "allowed-subnets": "", - "allowed-units": "", - "host": "", - "port": "", - "master": "", - "standbys": "", - "version": "", - "user": "", - "password": "", - "database": "", - "extensions": "", - } - rel_id = _harness.model.get_relation(RELATION_NAME).id - _harness.update_relation_data(rel_id, _harness.charm.app.name, data) - _harness.update_relation_data(rel_id, _harness.charm.unit.name, data) - - -def request_database(_harness): - # Reset the charm status. - _harness.model.unit.status = ActiveStatus() - rel_id = _harness.model.get_relation(RELATION_NAME).id - - with _harness.hooks_disabled(): - # Reset the application databag. - _harness.update_relation_data( - rel_id, - "application/0", - {"database": ""}, - ) - - # Reset the database databag. - clear_relation_data(_harness) - - # Simulate the request of a new database. - _harness.update_relation_data( - rel_id, - "application/0", - {"database": DATABASE}, - ) - - -def test_on_relation_changed(harness): - with ( - patch("charm.DbProvides.set_up_relation") as _set_up_relation, - patch.object(EventBase, "defer") as _defer, - patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, - ): - peer_rel_id = harness.model.get_relation(PEER).id - # Set some side effects to test multiple situations. - _member_started.side_effect = [False, False, True, True] - - # Request a database before the cluster is initialised. - request_database(harness) - _defer.assert_called_once() - _set_up_relation.assert_not_called() - - # Request a database before the database is ready. - with harness.hooks_disabled(): - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - request_database(harness) - assert _defer.call_count == 2 - _set_up_relation.assert_not_called() - - # Request a database to a non leader unit. - _defer.reset_mock() - with harness.hooks_disabled(): - harness.set_leader(False) - request_database(harness) - _defer.assert_not_called() - _set_up_relation.assert_not_called() - - # Request it again in a leader unit. - with harness.hooks_disabled(): - harness.set_leader() - request_database(harness) - _defer.assert_not_called() - _set_up_relation.assert_called_once() - - -def test_get_extensions(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Test when there are no extensions in the relation databags. - rel_id = harness.model.get_relation(RELATION_NAME).id - relation = harness.model.get_relation(RELATION_NAME, rel_id) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ([], set()) - - # Test when there are extensions in the application relation databag. - extensions = ["", "citext:public", "debversion"] - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, - ) - - # Test when there are extensions in the unit relation databag. - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"extensions": ""}, - ) - harness.update_relation_data( - rel_id, - "application/0", - {"extensions": ",".join(extensions)}, - ) - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[1].split(":")[0], extensions[2]}, - ) - - # Test when one of the plugins/extensions is enabled. - config = """options: - plugin_citext_enable: - default: true - type: boolean - plugin_debversion_enable: - default: false - type: boolean""" - harness = Harness(PostgresqlOperatorCharm, config=config) - harness.cleanup() - harness.begin() - assert harness.charm.legacy_db_relation._get_extensions(relation) == ( - [extensions[1], extensions[2]], - {extensions[2]}, - ) - - -def test_set_up_relation(harness): - with ( - patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, - patch("relations.db.DbProvides._update_unit_status") as _update_unit_status, - patch("relations.db.new_password", return_value="test-password") as _new_password, - patch("relations.db.DbProvides._get_extensions") as _get_extensions, - patch("relations.db.logger") as _logger, - ): - rel_id = harness.model.get_relation(RELATION_NAME).id - # Define some mocks' side effects. - extensions = ["citext:public", "debversion"] - _get_extensions.side_effect = [ - (extensions, {"debversion"}), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), - (extensions, set()), - ] - postgresql_mock.create_user = PropertyMock( - side_effect=[None, None, PostgreSQLCreateUserError, None, None] - ) - postgresql_mock.create_database = PropertyMock( - side_effect=[None, None, PostgreSQLCreateDatabaseError, None] - ) - postgresql_mock.get_postgresql_version = PropertyMock(return_value=POSTGRESQL_VERSION) - - # Assert no operation is done when at least one of the requested extensions - # is disabled. - relation = harness.model.get_relation(RELATION_NAME, rel_id) - assert not harness.charm.legacy_db_relation.set_up_relation(relation) - postgresql_mock.create_user.assert_not_called() - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - - # Assert that the correct calls were made in a successful setup. - harness.charm.unit.status = ActiveStatus() - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"database": DATABASE}, - ) - assert harness.charm.legacy_db_relation.set_up_relation(relation) - user = f"relation_id_{rel_id}" - postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) - postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=["pgaudit"], client_relations=[relation] - ) - assert postgresql_mock.get_postgresql_version.call_count == 1 - _update_unit_status.assert_called_once() - expected_data = { - "allowed-units": "application/0", - "database": DATABASE, - "extensions": ",".join(extensions), - "host": f"postgresql-k8s-0.postgresql-k8s-endpoints.{harness.model.name}.svc.cluster.local", - "master": f"dbname={DATABASE} fallback_application_name=application " - f"host=postgresql-k8s-primary.{harness.model.name}.svc.cluster.local " - f"password=test-password port=5432 user=relation_id_{rel_id}", - "password": "test-password", - "port": DATABASE_PORT, - "standbys": f"dbname={DATABASE} fallback_application_name=application " - f"host=postgresql-k8s-replicas.{harness.model.name}.svc.cluster.local " - f"password=test-password port=5432 user=relation_id_{rel_id}", - "user": f"relation_id_{rel_id}", - "version": POSTGRESQL_VERSION, - } - assert harness.get_relation_data(rel_id, harness.charm.app.name) == expected_data - assert harness.get_relation_data(rel_id, harness.charm.unit.name) == expected_data - assert not isinstance(harness.model.unit.status, BlockedStatus) - - # Assert that the correct calls were made when the database name is - # provided only in the unit databag. - postgresql_mock.create_user.reset_mock() - postgresql_mock.create_database.reset_mock() - postgresql_mock.get_postgresql_version.reset_mock() - _update_unit_status.reset_mock() - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"database": ""}, - ) - harness.update_relation_data( - rel_id, - "application/0", - {"database": DATABASE}, - ) - clear_relation_data(harness) - assert harness.charm.legacy_db_relation.set_up_relation(relation) - postgresql_mock.create_user.assert_called_once_with(user, "test-password", False) - postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=["pgaudit"], client_relations=[relation] - ) - assert postgresql_mock.get_postgresql_version.call_count == 1 - _update_unit_status.assert_called_once() - assert harness.get_relation_data(rel_id, harness.charm.app.name) == expected_data - assert harness.get_relation_data(rel_id, harness.charm.unit.name) == expected_data - assert not isinstance(harness.model.unit.status, BlockedStatus) - - # Assert that the correct calls were made when the database name is not provided. - postgresql_mock.create_user.reset_mock() - postgresql_mock.create_database.reset_mock() - postgresql_mock.get_postgresql_version.reset_mock() - _update_unit_status.reset_mock() - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application/0", - {"database": ""}, - ) - clear_relation_data(harness) - assert not harness.charm.legacy_db_relation.set_up_relation(relation) - postgresql_mock.create_user.assert_not_called() - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - # No data is set in the databags by the database. - assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} - assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} - assert not isinstance(harness.model.unit.status, BlockedStatus) - - # BlockedStatus due to a PostgreSQLCreateUserError. - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"database": DATABASE}, - ) - assert not harness.charm.legacy_db_relation.set_up_relation(relation) - postgresql_mock.create_database.assert_not_called() - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - assert isinstance(harness.model.unit.status, BlockedStatus) - # No data is set in the databags by the database. - assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} - assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} - - # BlockedStatus due to a PostgreSQLCreateDatabaseError. - harness.charm.unit.status = ActiveStatus() - assert not harness.charm.legacy_db_relation.set_up_relation(relation) - postgresql_mock.get_postgresql_version.assert_not_called() - _update_unit_status.assert_not_called() - assert isinstance(harness.model.unit.status, BlockedStatus) - # No data is set in the databags by the database. - assert harness.get_relation_data(rel_id, harness.charm.app.name) == {} - assert harness.get_relation_data(rel_id, harness.charm.unit.name) == {} - - # version is not updated due to a PostgreSQLGetPostgreSQLVersionError. - postgresql_mock.get_postgresql_version.side_effect = PostgreSQLGetPostgreSQLVersionError - harness.charm.unit.status = ActiveStatus() - assert harness.charm.legacy_db_relation.set_up_relation(relation) - _logger.exception.assert_called_once_with( - "Failed to retrieve the PostgreSQL version to initialise/update db relation" - ) - - -def test_update_unit_status(harness): - with ( - patch( - "relations.db.DbProvides._check_for_blocking_relations" - ) as _check_for_blocking_relations, - patch( - "charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock - ) as _has_blocked_status, - ): - rel_id = harness.model.get_relation(RELATION_NAME).id - # Test when the charm is not blocked. - relation = harness.model.get_relation(RELATION_NAME, rel_id) - _has_blocked_status.return_value = False - harness.charm.legacy_db_relation._update_unit_status(relation) - _check_for_blocking_relations.assert_not_called() - assert not isinstance(harness.charm.unit.status, ActiveStatus) - - # Test when the charm is blocked but not due to extensions request. - _has_blocked_status.return_value = True - harness.charm.unit.status = BlockedStatus("fake message") - harness.charm.legacy_db_relation._update_unit_status(relation) - _check_for_blocking_relations.assert_not_called() - assert not isinstance(harness.charm.unit.status, ActiveStatus) - - # Test when there are relations causing the blocked status. - harness.charm.unit.status = BlockedStatus("extensions requested through relation") - _check_for_blocking_relations.return_value = True - harness.charm.legacy_db_relation._update_unit_status(relation) - _check_for_blocking_relations.assert_called_once_with(relation.id) - assert not isinstance(harness.charm.unit.status, ActiveStatus) - - # Test when there are no relations causing the blocked status anymore. - _check_for_blocking_relations.reset_mock() - _check_for_blocking_relations.return_value = False - harness.charm.legacy_db_relation._update_unit_status(relation) - _check_for_blocking_relations.assert_called_once_with(relation.id) - assert isinstance(harness.charm.unit.status, ActiveStatus) - - -def test_on_relation_departed(harness): - with patch("charm.Patroni.member_started", new_callable=PropertyMock(return_value=True)): - # Test when this unit is departing the relation (due to a scale down event). - peer_rel_id = harness.model.get_relation(PEER).id - assert "departing" not in harness.get_relation_data(peer_rel_id, harness.charm.unit) - event = Mock() - event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} - event.departing_unit = harness.charm.unit - harness.charm.legacy_db_relation._on_relation_departed(event) - assert "departing" in harness.get_relation_data(peer_rel_id, harness.charm.unit) - - # Test when this unit is departing the relation (due to the relation being broken between the apps). - with harness.hooks_disabled(): - harness.update_relation_data(peer_rel_id, harness.charm.unit.name, {"departing": ""}) - event.relation.data = {harness.charm.app: {}, harness.charm.unit: {}} - event.departing_unit = Unit(f"{harness.charm.app}/1", None, harness.charm.app._backend, {}) - harness.charm.legacy_db_relation._on_relation_departed(event) - relation_data = harness.get_relation_data(peer_rel_id, harness.charm.unit) - assert "departing" not in relation_data - - -def test_on_relation_broken(harness): - with patch( - "charm.Patroni.member_started", new_callable=PropertyMock(return_value=True) - ) as _member_started: - rel_id = harness.model.get_relation(RELATION_NAME).id - peer_rel_id = harness.model.get_relation(PEER).id - with harness.hooks_disabled(): - harness.set_leader() - with patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock: - # Test when this unit is departing the relation (due to the relation being broken between the apps). - event = Mock() - event.relation.id = rel_id - harness.charm.legacy_db_relation._on_relation_broken(event) - user = f"relation_id_{rel_id}" - postgresql_mock.delete_user.assert_called_once_with(user) - - # Test when this unit is departing the relation (due to a scale down event). - postgresql_mock.reset_mock() - with harness.hooks_disabled(): - harness.update_relation_data( - peer_rel_id, harness.charm.unit.name, {"departing": "True"} - ) - harness.charm.legacy_db_relation._on_relation_broken(event) - postgresql_mock.delete_user.assert_not_called() - - -def test_on_relation_broken_extensions_unblock(harness): - with ( - patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, - patch( - "charm.PostgresqlOperatorCharm.primary_endpoint", - new_callable=PropertyMock, - ) as _primary_endpoint, - patch( - "charm.PostgresqlOperatorCharm._has_blocked_status", new_callable=PropertyMock - ) as _has_blocked_status, - patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, - patch("charm.DbProvides._on_relation_departed") as _on_relation_departed, - ): - rel_id = harness.model.get_relation(RELATION_NAME).id - # Set some side effects to test multiple situations. - _has_blocked_status.return_value = True - _member_started.return_value = True - _primary_endpoint.return_value = {"1.1.1.1"} - postgresql_mock.delete_user = PropertyMock(return_value=None) - harness.model.unit.status = BlockedStatus("extensions requested through relation") - with harness.hooks_disabled(): - harness.update_relation_data( - rel_id, - "application", - {"database": DATABASE, "extensions": "test"}, - ) - - # Break the relation that blocked the charm. - harness.remove_relation(rel_id) - assert isinstance(harness.model.unit.status, ActiveStatus) - - -def test_on_relation_broken_extensions_keep_block(harness): - with ( - patch.object(PostgresqlOperatorCharm, "postgresql", Mock()) as postgresql_mock, - patch( - "charm.PostgresqlOperatorCharm.primary_endpoint", - new_callable=PropertyMock, - ) as _primary_endpoint, - patch("charm.PostgresqlOperatorCharm.is_blocked", new_callable=PropertyMock) as is_blocked, - patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, - patch("charm.DbProvides._on_relation_departed") as _on_relation_departed, - ): - # Set some side effects to test multiple situations. - is_blocked.return_value = True - _member_started.return_value = True - _primary_endpoint.return_value = {"1.1.1.1"} - postgresql_mock.delete_user = PropertyMock(return_value=None) - harness.model.unit.status = BlockedStatus( - "extensions requested through relation, enable them through config options" - ) - with harness.hooks_disabled(): - first_rel_id = harness.add_relation(RELATION_NAME, "application1") - harness.update_relation_data( - first_rel_id, - "application1", - {"database": DATABASE, "extensions": "test"}, - ) - second_rel_id = harness.add_relation(RELATION_NAME, "application2") - harness.update_relation_data( - second_rel_id, - "application2", - {"database": DATABASE, "extensions": "test"}, - ) - - event = Mock() - event.relation.id = first_rel_id - # Break one of the relations that block the charm. - harness.charm.legacy_db_relation._on_relation_broken(event) - assert isinstance(harness.model.unit.status, BlockedStatus) diff --git a/tests/unit/test_patroni.py b/tests/unit/test_patroni.py index e000f2bef5..d1944abb4d 100644 --- a/tests/unit/test_patroni.py +++ b/tests/unit/test_patroni.py @@ -13,40 +13,38 @@ from charm import PostgresqlOperatorCharm from constants import REWIND_USER -from patroni import PATRONI_TIMEOUT, Patroni, SwitchoverFailedError +from patroni import PATRONI_TIMEOUT, Patroni, SwitchoverFailedError, SwitchoverNotSyncError from tests.helpers import STORAGE_PATH @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + yield harness + harness.cleanup() @pytest.fixture(autouse=True) def patroni(harness): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - # Setup Patroni wrapper. - patroni = Patroni( - harness.charm, - "postgresql-k8s-0", - ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], - "postgresql-k8s-primary.dev.svc.cluster.local", - "test-model", - STORAGE_PATH, - "superuser-password", - "replication-password", - "rewind-password", - False, - "patroni-password", - ) - root = harness.get_filesystem_root("postgresql") - (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) + # Setup Patroni wrapper. + patroni = Patroni( + harness.charm, + "postgresql-k8s-0", + ["postgresql-k8s-0", "postgresql-k8s-1", "postgresql-k8s-2"], + "postgresql-k8s-primary.dev.svc.cluster.local", + "test-model", + STORAGE_PATH, + "superuser-password", + "replication-password", + "rewind-password", + False, + "patroni-password", + ) + root = harness.get_filesystem_root("postgresql") + (root / "var" / "log" / "postgresql").mkdir(parents=True, exist_ok=True) - yield patroni + yield patroni # This method will be used by the mock to replace requests.get @@ -90,7 +88,7 @@ def test_get_primary(harness, patroni): _get.assert_called_once_with( "http://postgresql-k8s-0:8008/cluster", verify=True, - timeout=5, + timeout=10, auth=patroni._patroni_auth, ) @@ -101,7 +99,7 @@ def test_get_primary(harness, patroni): _get.assert_called_once_with( "http://postgresql-k8s-0:8008/cluster", verify=True, - timeout=5, + timeout=10, auth=patroni._patroni_auth, ) @@ -202,7 +200,7 @@ def test_render_patroni_yml_file(harness, patroni): ) as _rock_postgresql_version, patch("charm.Patroni._render_file") as _render_file, ): - _rock_postgresql_version.return_value = "14.7" + _rock_postgresql_version.return_value = "16.6" # Get the expected content from a file. with open("templates/patroni.yml.j2") as file: @@ -217,7 +215,8 @@ def test_render_patroni_yml_file(harness, patroni): rewind_user=REWIND_USER, rewind_password=patroni._rewind_password, minority_count=patroni._members_count // 2, - version="14", + synchronous_node_count=0, + version="16", patroni_password=patroni._patroni_password, ) @@ -252,7 +251,8 @@ def test_render_patroni_yml_file(harness, patroni): rewind_user=REWIND_USER, rewind_password=patroni._rewind_password, minority_count=patroni._members_count // 2, - version="14", + synchronous_node_count=0, + version="16", patroni_password=patroni._patroni_password, ) assert expected_content_with_tls != expected_content @@ -333,11 +333,9 @@ def test_switchover(harness, patroni): # Test failed switchovers. _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-1"] - try: + with pytest.raises(SwitchoverFailedError): patroni.switchover("postgresql-k8s/2") assert False - except SwitchoverFailedError: - pass _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, @@ -349,11 +347,9 @@ def test_switchover(harness, patroni): _post.reset_mock() _get_primary.side_effect = ["postgresql-k8s-0", "postgresql-k8s-2"] response.status_code = 400 - try: + with pytest.raises(SwitchoverFailedError): patroni.switchover("postgresql-k8s/2") assert False - except SwitchoverFailedError: - pass _post.assert_called_once_with( "http://postgresql-k8s-0:8008/switchover", json={"leader": "postgresql-k8s-0", "candidate": "postgresql-k8s-2"}, @@ -362,6 +358,14 @@ def test_switchover(harness, patroni): timeout=PATRONI_TIMEOUT, ) + # Test candidate, not sync + response = _post.return_value + response.status_code = 412 + response.text = "candidate name does not match with sync_standby" + with pytest.raises(SwitchoverNotSyncError): + patroni.switchover("candidate") + assert False + def test_member_replication_lag(harness, patroni): with ( @@ -465,3 +469,29 @@ def test_last_postgresql_logs(harness, patroni): (root / "var" / "log" / "postgresql" / "postgresql.3.log").unlink() (root / "var" / "log" / "postgresql").rmdir() assert patroni.last_postgresql_logs() == "" + + +def test_update_synchronous_node_count(harness, patroni): + with ( + patch("patroni.stop_after_delay", return_value=stop_after_delay(0)) as _wait_fixed, + patch("patroni.wait_fixed", return_value=wait_fixed(0)) as _wait_fixed, + patch("requests.patch") as _patch, + ): + response = _patch.return_value + response.status_code = 200 + + patroni.update_synchronous_node_count() + + _patch.assert_called_once_with( + "http://postgresql-k8s-0:8008/config", + json={"synchronous_node_count": 0}, + verify=True, + auth=patroni._patroni_auth, + timeout=10, + ) + + # Test when the request fails. + response.status_code = 500 + with pytest.raises(RetryError): + patroni.update_synchronous_node_count() + assert False diff --git a/tests/unit/test_postgresql.py b/tests/unit/test_postgresql.py index ed6665d9d6..676c5709f8 100644 --- a/tests/unit/test_postgresql.py +++ b/tests/unit/test_postgresql.py @@ -5,6 +5,7 @@ import psycopg2 import pytest from charms.postgresql_k8s.v0.postgresql import ( + PERMISSIONS_GROUP_ADMIN, PostgreSQLCreateDatabaseError, PostgreSQLGetLastArchivedWALError, ) @@ -12,20 +13,26 @@ from psycopg2.sql import SQL, Composed, Identifier, Literal from charm import PostgresqlOperatorCharm -from constants import PEER +from constants import ( + BACKUP_USER, + MONITORING_USER, + PEER, + REPLICATION_USER, + REWIND_USER, + USER, +) @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) + harness = Harness(PostgresqlOperatorCharm) - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def test_create_database(harness): @@ -76,7 +83,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("admin"), + Identifier(PERMISSIONS_GROUP_ADMIN), SQL(";"), ]) ), @@ -85,7 +92,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("backup"), + Identifier(BACKUP_USER), SQL(";"), ]) ), @@ -94,7 +101,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("replication"), + Identifier(REPLICATION_USER), SQL(";"), ]) ), @@ -103,7 +110,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("rewind"), + Identifier(REWIND_USER), SQL(";"), ]) ), @@ -112,7 +119,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("operator"), + Identifier(USER), SQL(";"), ]) ), @@ -121,7 +128,7 @@ def test_create_database(harness): SQL("GRANT ALL PRIVILEGES ON DATABASE "), Identifier(database), SQL(" TO "), - Identifier("monitoring"), + Identifier(MONITORING_USER), SQL(";"), ]) ), diff --git a/tests/unit/test_postgresql_provider.py b/tests/unit/test_postgresql_provider.py index e56b392387..5f7d253d11 100644 --- a/tests/unit/test_postgresql_provider.py +++ b/tests/unit/test_postgresql_provider.py @@ -20,30 +20,29 @@ DATABASE = "test_database" EXTRA_USER_ROLES = "CREATEDB,CREATEROLE" RELATION_NAME = "database" -POSTGRESQL_VERSION = "14" +POSTGRESQL_VERSION = "16" @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - harness.set_leader(True) - harness.begin() - - # Define some relations. - rel_id = harness.add_relation(RELATION_NAME, "application") - harness.add_relation_unit(rel_id, "application/0") - peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) - harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) - harness.update_relation_data( - peer_rel_id, - harness.charm.app.name, - {"cluster_initialised": "True"}, - ) - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + harness.set_leader(True) + harness.begin() + + # Define some relations. + rel_id = harness.add_relation(RELATION_NAME, "application") + harness.add_relation_unit(rel_id, "application/0") + peer_rel_id = harness.add_relation(PEER, harness.charm.app.name) + harness.add_relation_unit(peer_rel_id, harness.charm.unit.name) + harness.update_relation_data( + peer_rel_id, + harness.charm.app.name, + {"cluster_initialised": "True"}, + ) + yield harness + harness.cleanup() def request_database(_harness): @@ -108,12 +107,17 @@ def test_on_database_requested(harness): # Assert that the correct calls were made. user = f"relation_id_{rel_id}" postgresql_mock.create_user.assert_called_once_with( - user, "test-password", extra_user_roles=EXTRA_USER_ROLES + user, + "test-password", + extra_user_roles=[role.lower() for role in EXTRA_USER_ROLES.split(",")], ) database_relation = harness.model.get_relation(RELATION_NAME) client_relations = [database_relation] postgresql_mock.create_database.assert_called_once_with( - DATABASE, user, plugins=["pgaudit"], client_relations=client_relations + DATABASE, + user, + plugins=["pgaudit"], + client_relations=client_relations, ) postgresql_mock.get_postgresql_version.assert_called_once() diff --git a/tests/unit/test_postgresql_tls.py b/tests/unit/test_postgresql_tls.py index e5c4a3f532..afccae24e8 100644 --- a/tests/unit/test_postgresql_tls.py +++ b/tests/unit/test_postgresql_tls.py @@ -17,15 +17,14 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - harness = Harness(PostgresqlOperatorCharm) - - # Set up the initial relation and hooks. - peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") - harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") - harness.begin() - yield harness - harness.cleanup() + harness = Harness(PostgresqlOperatorCharm) + + # Set up the initial relation and hooks. + peer_rel_id = harness.add_relation(PEER, "postgresql-k8s") + harness.add_relation_unit(peer_rel_id, "postgresql-k8s/0") + harness.begin() + yield harness + harness.cleanup() def delete_secrets(_harness): diff --git a/tests/unit/test_rotate_logs.py b/tests/unit/test_rotate_logs.py index c5f66f1fce..8b579e8e21 100644 --- a/tests/unit/test_rotate_logs.py +++ b/tests/unit/test_rotate_logs.py @@ -3,7 +3,7 @@ import contextlib from unittest.mock import call, patch -from rotate_logs import main +from scripts.rotate_logs import main def test_main(): diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index 8035a6dd39..c25a54620f 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -20,23 +20,20 @@ @pytest.fixture(autouse=True) def harness(): - with patch("charm.KubernetesServicePatch", lambda x, y: None): - """Set up the test.""" - patcher = patch("lightkube.core.client.GenericSyncClient") - patcher.start() - harness = Harness(PostgresqlOperatorCharm) - harness.begin() - upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") - peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") - for rel_id in (upgrade_relation_id, peer_relation_id): - harness.add_relation_unit(rel_id, "postgresql-k8s/1") - harness.add_relation("restart", harness.charm.app.name) - with harness.hooks_disabled(): - harness.update_relation_data( - upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"} - ) - yield harness - harness.cleanup() + """Set up the test.""" + patcher = patch("lightkube.core.client.GenericSyncClient") + patcher.start() + harness = Harness(PostgresqlOperatorCharm) + harness.begin() + upgrade_relation_id = harness.add_relation("upgrade", "postgresql-k8s") + peer_relation_id = harness.add_relation("database-peers", "postgresql-k8s") + for rel_id in (upgrade_relation_id, peer_relation_id): + harness.add_relation_unit(rel_id, "postgresql-k8s/1") + harness.add_relation("restart", harness.charm.app.name) + with harness.hooks_disabled(): + harness.update_relation_data(upgrade_relation_id, "postgresql-k8s/1", {"state": "idle"}) + yield harness + harness.cleanup() def test_is_no_sync_member(harness): @@ -158,6 +155,9 @@ def test_on_upgrade_changed(harness): with ( patch("charm.PostgresqlOperatorCharm.update_config") as _update_config, patch("charm.Patroni.member_started", new_callable=PropertyMock) as _member_started, + patch( + "charm.PostgresqlOperatorCharm.updated_synchronous_node_count" + ) as _updated_synchronous_node_count, ): harness.set_can_connect(POSTGRESQL_CONTAINER, True) _member_started.return_value = False @@ -168,6 +168,7 @@ def test_on_upgrade_changed(harness): _member_started.return_value = True harness.charm.on.upgrade_relation_changed.emit(relation) _update_config.assert_called_once() + _updated_synchronous_node_count.assert_called_once_with() def test_pre_upgrade_check(harness): diff --git a/tox.ini b/tox.ini index f61bf9cf28..6c23630de1 100644 --- a/tox.ini +++ b/tox.ini @@ -7,9 +7,10 @@ env_list = lint, unit [vars] src_path = "{tox_root}/src" +scripts_path = "{tox_root}/scripts" tests_path = "{tox_root}/tests" lib_path = "{tox_root}/lib/charms/postgresql_k8s" -all_path = {[vars]src_path} {[vars]tests_path} {[vars]lib_path} +all_path = {[vars]src_path} {[vars]scripts_path} {[vars]tests_path} {[vars]lib_path} [testenv] set_env = @@ -48,7 +49,7 @@ set_env = commands_pre = poetry install --only main,charm-libs,unit --no-root commands = - poetry run coverage run --source={[vars]src_path} \ + poetry run coverage run --source={[vars]src_path},{[vars]scripts_path} \ -m pytest -v --tb native -s {posargs} {[vars]tests_path}/unit poetry run coverage report poetry run coverage xml @@ -57,8 +58,10 @@ commands = description = Run integration tests pass_env = CI - GITHUB_OUTPUT - SECRETS_FROM_GITHUB + AWS_ACCESS_KEY + AWS_SECRET_KEY + GCP_ACCESS_KEY + GCP_SECRET_KEY commands_pre = poetry install --only integration --no-root commands =