diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index b9dd27ad30..05c7ea52aa 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -52,45 +52,14 @@ jobs:
uses: canonical/data-platform-workflows/.github/workflows/build_charm.yaml@v29.1.0
integration-test:
- strategy:
- fail-fast: false
- matrix:
- juju:
- - agent: 2.9.51 # renovate: juju-agent-pin-minor
- libjuju: ==2.9.49.1 # renovate: latest libjuju 2
- allure_on_amd64: false
- - agent: 3.6.2 # renovate: juju-agent-pin-minor
- allure_on_amd64: true
- architecture:
- - amd64
- include:
- - juju:
- agent: 3.6.2 # renovate: juju-agent-pin-minor
- allure_on_amd64: true
- architecture: arm64
- name: Integration | ${{ matrix.juju.agent }} | ${{ matrix.architecture }}
+ name: Integration test charm
needs:
- lint
- unit-test
- build
- uses: canonical/data-platform-workflows/.github/workflows/integration_test_charm.yaml@v29.1.0
+ uses: ./.github/workflows/integration_test.yaml
with:
artifact-prefix: ${{ needs.build.outputs.artifact-prefix }}
- architecture: ${{ matrix.architecture }}
- cloud: lxd
- juju-agent-version: ${{ matrix.juju.agent }}
- libjuju-version-constraint: ${{ matrix.juju.libjuju }}
- _beta_allure_report: ${{ matrix.juju.allure_on_amd64 && matrix.architecture == 'amd64' }}
- secrets:
- integration-test: |
- {
- "AWS_ACCESS_KEY": "${{ secrets.AWS_ACCESS_KEY }}",
- "AWS_SECRET_KEY": "${{ secrets.AWS_SECRET_KEY }}",
- "GCP_ACCESS_KEY": "${{ secrets.GCP_ACCESS_KEY }}",
- "GCP_SECRET_KEY": "${{ secrets.GCP_SECRET_KEY }}",
- "UBUNTU_PRO_TOKEN" : "${{ secrets.UBUNTU_PRO_TOKEN }}",
- "LANDSCAPE_ACCOUNT_NAME": "${{ secrets.LANDSCAPE_ACCOUNT_NAME }}",
- "LANDSCAPE_REGISTRATION_KEY": "${{ secrets.LANDSCAPE_REGISTRATION_KEY }}",
- }
+ secrets: inherit
permissions:
- contents: write # Needed for Allure Report beta
+ contents: write # Needed for Allure Report
diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml
new file mode 100644
index 0000000000..a89194c525
--- /dev/null
+++ b/.github/workflows/integration_test.yaml
@@ -0,0 +1,316 @@
+on:
+ workflow_call:
+ inputs:
+ artifact-prefix:
+ description: |
+ Prefix for charm package GitHub artifact(s)
+
+ Use canonical/data-platform-workflows build_charm.yaml to build the charm(s)
+ required: true
+ type: string
+
+jobs:
+ collect-integration-tests:
+ name: Collect integration test spread jobs
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ - name: Set up environment
+ run: |
+ sudo snap install charmcraft --classic
+ pipx install tox poetry
+ - name: Collect spread jobs
+ id: collect-jobs
+ shell: python
+ run: |
+ import json
+ import os
+ import subprocess
+
+ spread_jobs = (
+ subprocess.run(
+ ["charmcraft", "test", "--list", "github-ci"], capture_output=True, check=True, text=True
+ )
+ .stdout.strip()
+ .split("\n")
+ )
+ jobs = []
+ for job in spread_jobs:
+ # Example `job`: "github-ci:ubuntu-24.04:tests/spread/test_charm.py:juju36"
+ _, runner, task, variant = job.split(":")
+ # Example: "test_charm.py"
+ task = task.removeprefix("tests/spread/")
+ if runner.endswith("-arm"):
+ architecture = "arm64"
+ else:
+ architecture = "amd64"
+ # Example: "test_charm.py:juju36 | amd64"
+ name = f"{task}:{variant} | {architecture}"
+ # ":" character not valid in GitHub Actions artifact
+ name_in_artifact = f"{task}-{variant}-{architecture}"
+ jobs.append({
+ "spread_job": job,
+ "name": name,
+ "name_in_artifact": name_in_artifact,
+ "runner": runner,
+ })
+ output = f"jobs={json.dumps(jobs)}"
+ print(output)
+ with open(os.environ["GITHUB_OUTPUT"], "a") as file:
+ file.write(output)
+ - name: Generate Allure default test results
+ if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }}
+ run: tox run -e integration -- tests/integration --allure-default-dir=allure-default-results
+ - name: Upload Allure default results
+ # Default test results in case the integration tests time out or runner set up fails
+ # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test)
+ if: ${{ github.event_name == 'schedule' && github.run_attempt == '1' }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: allure-default-results-integration-test
+ path: allure-default-results/
+ if-no-files-found: error
+ outputs:
+ jobs: ${{ steps.collect-jobs.outputs.jobs }}
+
+ integration-test:
+ strategy:
+ fail-fast: false
+ matrix:
+ job: ${{ fromJSON(needs.collect-integration-tests.outputs.jobs) }}
+ name: ${{ matrix.job.name }}
+ needs:
+ - collect-integration-tests
+ runs-on: ${{ matrix.job.runner }}
+ timeout-minutes: 217 # Sum of steps `timeout-minutes` + 5
+ steps:
+ - name: Free up disk space
+ timeout-minutes: 1
+ run: |
+ printf '\nDisk usage before cleanup\n'
+ df --human-readable
+ # Based on https://github.com/actions/runner-images/issues/2840#issuecomment-790492173
+ rm -r /opt/hostedtoolcache/
+ printf '\nDisk usage after cleanup\n'
+ df --human-readable
+ - name: Checkout
+ timeout-minutes: 3
+ uses: actions/checkout@v4
+ - name: Set up environment
+ timeout-minutes: 5
+ run: sudo snap install charmcraft --classic
+ # TODO: remove when https://github.com/canonical/charmcraft/issues/2105 and
+ # https://github.com/canonical/charmcraft/issues/2130 fixed
+ - run: |
+ sudo snap install go --classic
+ go install github.com/snapcore/spread/cmd/spread@latest
+ - name: Download packed charm(s)
+ timeout-minutes: 5
+ uses: actions/download-artifact@v4
+ with:
+ pattern: ${{ inputs.artifact-prefix }}-*
+ merge-multiple: true
+ - name: Run spread job
+ timeout-minutes: 180
+ id: spread
+ # TODO: replace with `charmcraft test` when
+ # https://github.com/canonical/charmcraft/issues/2105 and
+ # https://github.com/canonical/charmcraft/issues/2130 fixed
+ run: ~/go/bin/spread -vv -artifacts=artifacts '${{ matrix.job.spread_job }}'
+ env:
+ AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }}
+ AWS_SECRET_KEY: ${{ secrets.AWS_SECRET_KEY }}
+ GCP_ACCESS_KEY: ${{ secrets.GCP_ACCESS_KEY }}
+ GCP_SECRET_KEY: ${{ secrets.GCP_SECRET_KEY }}
+ UBUNTU_PRO_TOKEN: ${{ secrets.UBUNTU_PRO_TOKEN }}
+ LANDSCAPE_ACCOUNT_NAME: ${{ secrets.LANDSCAPE_ACCOUNT_NAME }}
+ LANDSCAPE_REGISTRATION_KEY: ${{ secrets.LANDSCAPE_REGISTRATION_KEY }}
+ - name: Upload Allure results
+ timeout-minutes: 3
+ # Only upload results from one spread system & one spread variant
+ # Allure can only process one result per pytest test ID. If parameterization is done via
+ # spread instead of pytest, there will be overlapping pytest test IDs.
+ if: ${{ (success() || (failure() && steps.spread.outcome == 'failure')) && startsWith(matrix.job.spread_job, 'github-ci:ubuntu-24.04:') && endsWith(matrix.job.spread_job, ':juju36') && github.event_name == 'schedule' && github.run_attempt == '1' }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: allure-results-integration-test-${{ matrix.job.name_in_artifact }}
+ path: artifacts/${{ matrix.job.spread_job }}/allure-results/
+ if-no-files-found: error
+ - timeout-minutes: 1
+ if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }}
+ run: snap list
+ - name: Select model
+ timeout-minutes: 1
+ # `!contains(matrix.job.spread_job, 'juju29')` workaround for juju 2 error:
+ # "ERROR cannot acquire lock file to read controller concierge-microk8s: unable to open
+ # /tmp/juju-store-lock-3635383939333230: permission denied"
+ # Unable to workaround error with `sudo rm /tmp/juju-*`
+ if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+ id: juju-switch
+ run: |
+ # sudo needed since spread runs scripts as root
+ # "testing" is default model created by concierge
+ sudo juju switch testing
+ mkdir ~/logs/
+ - name: juju status
+ timeout-minutes: 1
+ if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+ run: sudo juju status --color --relations | tee ~/logs/juju-status.txt
+ - name: juju debug-log
+ timeout-minutes: 3
+ if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+ run: sudo juju debug-log --color --replay --no-tail | tee ~/logs/juju-debug-log.txt
+ - name: jhack tail
+ timeout-minutes: 3
+ if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+ run: sudo jhack tail --printer raw --replay --no-watch | tee ~/logs/jhack-tail.txt
+ - name: Upload logs
+ timeout-minutes: 5
+ if: ${{ !contains(matrix.job.spread_job, 'juju29') && (success() || (failure() && steps.spread.outcome == 'failure')) }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: logs-integration-test-${{ matrix.job.name_in_artifact }}
+ path: ~/logs/
+ if-no-files-found: error
+ - name: Disk usage
+ timeout-minutes: 1
+ if: ${{ success() || (failure() && steps.spread.outcome == 'failure') }}
+ run: df --human-readable
+
+ allure-report:
+ # TODO future improvement: use concurrency group for job
+ name: Publish Allure report
+ if: ${{ !cancelled() && github.event_name == 'schedule' && github.run_attempt == '1' }}
+ needs:
+ - integration-test
+ runs-on: ubuntu-latest
+ timeout-minutes: 5
+ steps:
+ - name: Download Allure
+ # Following instructions from https://allurereport.org/docs/install-for-linux/#install-from-a-deb-package
+ run: gh release download --repo allure-framework/allure2 --pattern 'allure_*.deb'
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ - name: Install Allure
+ run: |
+ sudo apt-get update
+ sudo apt-get install ./allure_*.deb -y
+ # For first run, manually create branch with no history
+ # (e.g.
+ # git checkout --orphan gh-pages-beta
+ # git rm -rf .
+ # touch .nojekyll
+ # git add .nojekyll
+ # git commit -m "Initial commit"
+ # git push origin gh-pages-beta
+ # )
+ - name: Checkout GitHub pages branch
+ uses: actions/checkout@v4
+ with:
+ ref: gh-pages-beta
+ path: repo/
+ - name: Download default test results
+ # Default test results in case the integration tests time out or runner set up fails
+ # (So that Allure report will show "unknown"/"failed" test result, instead of omitting the test)
+ uses: actions/download-artifact@v4
+ with:
+ path: allure-default-results/
+ name: allure-default-results-integration-test
+ - name: Download test results
+ uses: actions/download-artifact@v4
+ with:
+ path: allure-results/
+ pattern: allure-results-integration-test-*
+ merge-multiple: true
+ - name: Combine Allure default results & actual results
+ # For every test: if actual result available, use that. Otherwise, use default result
+ # So that, if actual result not available, Allure report will show "unknown"/"failed" test result
+ # instead of omitting the test
+ shell: python
+ run: |
+ import dataclasses
+ import json
+ import pathlib
+
+
+ @dataclasses.dataclass(frozen=True)
+ class Result:
+ test_case_id: str
+ path: pathlib.Path
+
+ def __eq__(self, other):
+ if not isinstance(other, type(self)):
+ return False
+ return self.test_case_id == other.test_case_id
+
+
+ actual_results = pathlib.Path("allure-results")
+ default_results = pathlib.Path("allure-default-results")
+
+ results: dict[pathlib.Path, set[Result]] = {
+ actual_results: set(),
+ default_results: set(),
+ }
+ for directory, results_ in results.items():
+ for path in directory.glob("*-result.json"):
+ with path.open("r") as file:
+ id_ = json.load(file)["testCaseId"]
+ results_.add(Result(id_, path))
+
+ actual_results.mkdir(exist_ok=True)
+
+ missing_results = results[default_results] - results[actual_results]
+ for default_result in missing_results:
+ # Move to `actual_results` directory
+ default_result.path.rename(actual_results / default_result.path.name)
+ - name: Load test report history
+ run: |
+ if [[ -d repo/_latest/history/ ]]
+ then
+ echo 'Loading history'
+ cp -r repo/_latest/history/ allure-results/
+ fi
+ - name: Create executor.json
+ shell: python
+ run: |
+ # Reverse engineered from https://github.com/simple-elf/allure-report-action/blob/eca283b643d577c69b8e4f048dd6cd8eb8457cfd/entrypoint.sh
+ import json
+
+ DATA = {
+ "name": "GitHub Actions",
+ "type": "github",
+ "buildOrder": ${{ github.run_number }}, # TODO future improvement: use run ID
+ "buildName": "Run ${{ github.run_id }}",
+ "buildUrl": "https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}",
+ "reportUrl": "../${{ github.run_number }}/",
+ }
+ with open("allure-results/executor.json", "w") as file:
+ json.dump(DATA, file)
+ - name: Generate Allure report
+ run: allure generate
+ - name: Create index.html
+ shell: python
+ run: |
+ DATA = f"""
+
+
+
+ """
+ with open("repo/index.html", "w") as file:
+ file.write(DATA)
+ - name: Update GitHub pages branch
+ working-directory: repo/
+ # TODO future improvement: commit message
+ run: |
+ mkdir '${{ github.run_number }}'
+ rm -f _latest
+ ln -s '${{ github.run_number }}' _latest
+ cp -r ../allure-report/. _latest/
+ git add .
+ git config user.name "GitHub Actions"
+ git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
+ git commit -m "Allure report ${{ github.run_number }}"
+ # Uses token set in checkout step
+ git push origin gh-pages-beta
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index deefea45b2..b3de155ce1 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -21,7 +21,7 @@ jobs:
uses: ./.github/workflows/ci.yaml
secrets: inherit
permissions:
- contents: write # Needed for Allure Report beta
+ contents: write # Needed for Allure Report
release:
name: Release charm
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 2e22c8c702..388378cdff 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -33,7 +33,7 @@ source venv/bin/activate
tox run -e format # update your code according to linting rules
tox run -e lint # code style
tox run -e unit # unit tests
-tox run -e integration # integration tests
+charmcraft test lxd-vm: # integration tests
tox # runs 'lint' and 'unit' environments
```
diff --git a/concierge.yaml b/concierge.yaml
new file mode 100644
index 0000000000..15a78cc947
--- /dev/null
+++ b/concierge.yaml
@@ -0,0 +1,13 @@
+juju:
+ model-defaults:
+ logging-config: =INFO; unit=DEBUG
+providers:
+ lxd:
+ enable: true
+ bootstrap: true
+host:
+ snaps:
+ jhack:
+ channel: latest/edge
+ connections:
+ - jhack:dot-local-share-juju snapd
diff --git a/poetry.lock b/poetry.lock
index c3e2e6d8ba..a951426a86 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand.
[[package]]
name = "allure-pytest"
@@ -17,26 +17,21 @@ allure-python-commons = "2.13.5"
pytest = ">=4.5.0"
[[package]]
-name = "allure-pytest-collection-report"
-version = "0.1.0"
-description = ""
+name = "allure-pytest-default-results"
+version = "0.1.2"
+description = "Generate default \"unknown\" results to show in Allure Report if test case does not run"
optional = false
python-versions = ">=3.8"
groups = ["integration"]
-files = []
-develop = false
+files = [
+ {file = "allure_pytest_default_results-0.1.2-py3-none-any.whl", hash = "sha256:8dc6c5a5d548661c38111a2890509e794204586fa81cefbe61315fb63996e50c"},
+ {file = "allure_pytest_default_results-0.1.2.tar.gz", hash = "sha256:eb6c16aa1c2ede69e653a0ee38094791685eaacb0ac6b2cae5c6da1379dbdbfd"},
+]
[package.dependencies]
allure-pytest = ">=2.13.5"
pytest = "*"
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v29.1.0"
-resolved_reference = "cf3e292107a8d420c452e35cf7552c225add7fbd"
-subdirectory = "python/pytest_plugins/allure_pytest_collection_report"
-
[[package]]
name = "allure-python-commons"
version = "2.13.5"
@@ -566,6 +561,7 @@ files = [
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb"},
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b"},
{file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543"},
+ {file = "cryptography-44.0.0-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:60eb32934076fa07e4316b7b2742fa52cbb190b42c2df2863dbc4230a0a9b385"},
{file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e"},
{file = "cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e"},
{file = "cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053"},
@@ -576,6 +572,7 @@ files = [
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289"},
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7"},
{file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c"},
+ {file = "cryptography-44.0.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:9abcc2e083cbe8dde89124a47e5e53ec38751f0d7dfd36801008f316a127d7ba"},
{file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64"},
{file = "cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285"},
{file = "cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417"},
@@ -1555,7 +1552,6 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
- {file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@@ -1616,7 +1612,6 @@ files = [
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bb89f0a835bcfc1d42ccd5f41f04870c1b936d8507c6df12b7737febc40f0909"},
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f0c2d907a1e102526dd2986df638343388b94c33860ff3bbe1384130828714b1"},
{file = "psycopg2_binary-2.9.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f8157bed2f51db683f31306aa497311b560f2265998122abe1dce6428bd86567"},
- {file = "psycopg2_binary-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:27422aa5f11fbcd9b18da48373eb67081243662f9b46e6fd07c3eb46e4535142"},
{file = "psycopg2_binary-2.9.10-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:eb09aa7f9cecb45027683bb55aebaaf45a0df8bf6de68801a6afdc7947bb09d4"},
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b73d6d7f0ccdad7bc43e6d34273f70d587ef62f824d7261c4ae9b8b1b6af90e8"},
{file = "psycopg2_binary-2.9.10-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce5ab4bf46a211a8e924d307c1b1fcda82368586a19d0a24f8ae166f5c784864"},
@@ -1900,23 +1895,6 @@ pytest = ">=7.0.0"
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
-[[package]]
-name = "pytest-github-secrets"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = ">=3.8"
-groups = ["integration"]
-files = []
-develop = false
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v29.1.0"
-resolved_reference = "cf3e292107a8d420c452e35cf7552c225add7fbd"
-subdirectory = "python/pytest_plugins/github_secrets"
-
[[package]]
name = "pytest-operator"
version = "0.39.0"
@@ -1937,46 +1915,6 @@ pytest = "*"
pytest-asyncio = "<0.23"
pyyaml = "*"
-[[package]]
-name = "pytest-operator-cache"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = ">=3.8"
-groups = ["integration"]
-files = []
-develop = false
-
-[package.dependencies]
-pyyaml = "*"
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v29.1.0"
-resolved_reference = "cf3e292107a8d420c452e35cf7552c225add7fbd"
-subdirectory = "python/pytest_plugins/pytest_operator_cache"
-
-[[package]]
-name = "pytest-operator-groups"
-version = "0.1.0"
-description = ""
-optional = false
-python-versions = ">=3.8"
-groups = ["integration"]
-files = []
-develop = false
-
-[package.dependencies]
-pytest = "*"
-
-[package.source]
-type = "git"
-url = "https://github.com/canonical/data-platform-workflows"
-reference = "v29.1.0"
-resolved_reference = "cf3e292107a8d420c452e35cf7552c225add7fbd"
-subdirectory = "python/pytest_plugins/pytest_operator_groups"
-
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -2697,4 +2635,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = "^3.10"
-content-hash = "b89b458ed0f91e834c027be0d2541d464e97d65aaf56b0454c1ed07b004828e1"
+content-hash = "2bc4a893d47cdea828762f430354381eeea5e1ef3685f83302c33299117a439d"
diff --git a/pyproject.toml b/pyproject.toml
index 11a4e50151..fee816917b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -61,10 +61,7 @@ optional = true
[tool.poetry.group.integration.dependencies]
pytest = "^8.3.4"
-pytest-github-secrets = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.1.0", subdirectory = "python/pytest_plugins/github_secrets"}
pytest-operator = "^0.39.0"
-pytest-operator-cache = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.1.0", subdirectory = "python/pytest_plugins/pytest_operator_cache"}
-pytest-operator-groups = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.1.0", subdirectory = "python/pytest_plugins/pytest_operator_groups"}
# renovate caret doesn't work: https://github.com/renovatebot/renovate/issues/26940
juju = "<=3.6.1.0"
boto3 = "*"
@@ -73,7 +70,7 @@ landscape-api-py3 = "^0.9.0"
mailmanclient = "^3.3.5"
psycopg2-binary = "^2.9.10"
allure-pytest = "^2.13.5"
-allure-pytest-collection-report = {git = "https://github.com/canonical/data-platform-workflows", tag = "v29.1.0", subdirectory = "python/pytest_plugins/allure_pytest_collection_report"}
+allure-pytest-default-results = "^0.1.2"
# Testing tools configuration
[tool.coverage.run]
@@ -89,7 +86,7 @@ exclude_lines = [
minversion = "6.0"
log_cli_level = "INFO"
asyncio_mode = "auto"
-markers = ["unstable", "juju2", "juju3", "juju_secrets"]
+markers = ["juju2", "juju3", "juju_secrets"]
# Formatting tools configuration
[tool.black]
diff --git a/spread.yaml b/spread.yaml
new file mode 100644
index 0000000000..2b435a5885
--- /dev/null
+++ b/spread.yaml
@@ -0,0 +1,137 @@
+project: postgresql-operator
+
+backends:
+ # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L11
+ lxd-vm:
+ # TODO: remove after https://github.com/canonical/spread/pull/185 merged & in charmcraft
+ type: adhoc
+ allocate: |
+ hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])")
+ VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}"
+ DISK="${DISK:-20}"
+ CPU="${CPU:-4}"
+ MEM="${MEM:-8}"
+
+ cloud_config="#cloud-config
+ ssh_pwauth: true
+ users:
+ - default
+ - name: runner
+ plain_text_passwd: $SPREAD_PASSWORD
+ lock_passwd: false
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ "
+
+ lxc launch --vm \
+ "${SPREAD_SYSTEM//-/:}" \
+ "${VM_NAME}" \
+ -c user.user-data="${cloud_config}" \
+ -c limits.cpu="${CPU}" \
+ -c limits.memory="${MEM}GiB" \
+ -d root,size="${DISK}GiB"
+
+ # Wait for the runner user
+ while ! lxc exec "${VM_NAME}" -- id -u runner &>/dev/null; do sleep 0.5; done
+
+ # Set the instance address for spread
+ ADDRESS "$(lxc ls -f csv | grep "${VM_NAME}" | cut -d"," -f3 | cut -d" " -f1)"
+ discard: |
+ hash=$(python3 -c "import hashlib; print(hashlib.sha256('$SPREAD_PASSWORD'.encode()).hexdigest()[:6])")
+ VM_NAME="${VM_NAME:-${SPREAD_SYSTEM//./-}-${hash}}"
+ lxc delete --force "${VM_NAME}"
+ environment:
+ CONCIERGE_EXTRA_SNAPS: charmcraft
+ CONCIERGE_EXTRA_DEBS: pipx
+ systems:
+ - ubuntu-24.04:
+ username: runner
+ prepare: |
+ systemctl disable --now unattended-upgrades.service
+ systemctl mask unattended-upgrades.service
+ pipx install charmcraftcache
+ cd "$SPREAD_PATH"
+ charmcraftcache pack -v
+ restore-each: |
+ cd "$SPREAD_PATH"
+ # Revert python-libjuju version override
+ git restore pyproject.toml poetry.lock
+
+ # Use instead of `concierge restore` to save time between tests
+ # For example, with microk8s, using `concierge restore` takes twice as long as this (e.g. 6
+ # min instead of 3 min between every spread job)
+ juju destroy-model --force --no-wait --destroy-storage --no-prompt testing
+ juju kill-controller --no-prompt concierge-lxd
+ restore: |
+ rm -rf "$SPREAD_PATH"
+
+ github-ci:
+ type: adhoc
+ # Only run on CI
+ manual: true
+ # HACK: spread requires runners to be accessible via SSH
+ # Configure local sshd & instruct spread to connect to the same machine spread is running on
+ # (spread cannot provision GitHub Actions runners, so we provision a GitHub Actions runner for
+ # each spread job & select a single job when running spread)
+ # Derived from https://github.com/jnsgruk/zinc-k8s-operator/blob/a21eae8399eb3b9df4ddb934b837af25ef831976/spread.yaml#L47
+ allocate: |
+ sudo tee /etc/ssh/sshd_config.d/10-spread-github-ci.conf << 'EOF'
+ PasswordAuthentication yes
+ PermitEmptyPasswords yes
+ EOF
+
+ ADDRESS localhost
+ # HACK: spread does not pass environment variables set on runner
+ # Manually pass specific environment variables
+ environment:
+ CI: '$(HOST: echo $CI)'
+ AWS_ACCESS_KEY: '$(HOST: echo $AWS_ACCESS_KEY)'
+ AWS_SECRET_KEY: '$(HOST: echo $AWS_SECRET_KEY)'
+ GCP_ACCESS_KEY: '$(HOST: echo $GCP_ACCESS_KEY)'
+ GCP_SECRET_KEY: '$(HOST: echo $GCP_SECRET_KEY)'
+ UBUNTU_PRO_TOKEN: '$(HOST: echo $UBUNTU_PRO_TOKEN)'
+ LANDSCAPE_ACCOUNT_NAME: '$(HOST: echo $LANDSCAPE_ACCOUNT_NAME)'
+ LANDSCAPE_REGISTRATION_KEY: '$(HOST: echo $LANDSCAPE_REGISTRATION_KEY)'
+ systems:
+ - ubuntu-24.04:
+ username: runner
+ - ubuntu-24.04-arm:
+ username: runner
+ variants:
+ - -juju29
+
+suites:
+ tests/spread/:
+ summary: Spread tests
+
+path: /root/spread_project
+
+kill-timeout: 3h
+environment:
+ PATH: $PATH:$(pipx environment --value PIPX_BIN_DIR)
+ CONCIERGE_JUJU_CHANNEL/juju36: 3.6/stable
+ CONCIERGE_JUJU_CHANNEL/juju29: 2.9/stable
+prepare: |
+ snap refresh --hold
+ chown -R root:root "$SPREAD_PATH"
+ cd "$SPREAD_PATH"
+ snap install --classic concierge
+
+ # Install charmcraft & pipx (on lxd-vm backend)
+ concierge prepare --trace
+
+ pipx install tox poetry
+prepare-each: |
+ cd "$SPREAD_PATH"
+ if [[ $SPREAD_VARIANT == *"juju29"* ]]
+ then
+ # Each version of python-libjuju is only compatible with one major Juju version
+ # Override python-libjuju version pinned in poetry.lock
+ poetry add --lock --group integration juju@^2
+ fi
+ # `concierge prepare` needs to be run for each spread job in case Juju version changed
+ concierge prepare --trace
+
+ # Unable to set constraint on all models because of Juju bug:
+ # https://bugs.launchpad.net/juju/+bug/2065050
+ juju set-model-constraints arch="$(dpkg --print-architecture)"
+# Only restore on lxd backend—no need to restore on CI
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
index 87bd24fb9b..bdce9d8e13 100644
--- a/tests/integration/conftest.py
+++ b/tests/integration/conftest.py
@@ -1,12 +1,13 @@
-#!/usr/bin/env python3
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import pytest
-from pytest_operator.plugin import OpsTest
+from . import architecture
-@pytest.fixture(scope="module")
-async def charm(ops_test: OpsTest):
- """Build the charm-under-test."""
- # Build charm from local source folder.
- yield await ops_test.build_charm(".")
+
+@pytest.fixture(scope="session")
+def charm():
+ # Return str instead of pathlib.Path since python-libjuju's model.deploy(), juju deploy, and
+ # juju bundle files expect local charms to begin with `./` or `/` to distinguish them from
+ # Charmhub charms.
+ return f"./postgresql_ubuntu@22.04-{architecture.architecture}.charm"
diff --git a/tests/integration/ha_tests/test_async_replication.py b/tests/integration/ha_tests/test_async_replication.py
index 7b76660bb3..588486471f 100644
--- a/tests/integration/ha_tests/test_async_replication.py
+++ b/tests/integration/ha_tests/test_async_replication.py
@@ -99,7 +99,6 @@ async def second_model_continuous_writes(second_model) -> None:
assert action.results["result"] == "True", "Unable to clear up continuous_writes table"
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_deploy_async_replication_setup(
@@ -107,7 +106,6 @@ async def test_deploy_async_replication_setup(
) -> None:
"""Build and deploy two PostgreSQL cluster in two separate models to test async replication."""
if not await app_name(ops_test):
- charm = await ops_test.build_charm(".")
await ops_test.model.deploy(
charm,
num_units=CLUSTER_SIZE,
@@ -122,7 +120,6 @@ async def test_deploy_async_replication_setup(
)
await ops_test.model.relate(DATABASE_APP_NAME, DATA_INTEGRATOR_APP_NAME)
if not await app_name(ops_test, model=second_model):
- charm = await ops_test.build_charm(".")
await second_model.deploy(
charm,
num_units=CLUSTER_SIZE,
@@ -146,7 +143,6 @@ async def test_deploy_async_replication_setup(
)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_async_replication(
@@ -224,7 +220,6 @@ async def test_async_replication(
await check_writes(ops_test, extra_model=second_model)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_get_data_integrator_credentials(
@@ -237,7 +232,6 @@ async def test_get_data_integrator_credentials(
data_integrator_credentials = result.results
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_switchover(
@@ -292,7 +286,6 @@ async def test_switchover(
await are_writes_increasing(ops_test, extra_model=second_model)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_data_integrator_creds_keep_on_working(
@@ -315,7 +308,6 @@ async def test_data_integrator_creds_keep_on_working(
connection.close()
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_promote_standby(
@@ -393,7 +385,6 @@ async def test_promote_standby(
await are_writes_increasing(ops_test)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_reestablish_relation(
@@ -451,7 +442,6 @@ async def test_reestablish_relation(
await check_writes(ops_test, extra_model=second_model)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_async_replication_failover_in_main_cluster(
@@ -497,7 +487,6 @@ async def test_async_replication_failover_in_main_cluster(
await check_writes(ops_test, extra_model=second_model)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_async_replication_failover_in_secondary_cluster(
@@ -534,7 +523,6 @@ async def test_async_replication_failover_in_secondary_cluster(
await check_writes(ops_test, extra_model=second_model)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_scaling(
diff --git a/tests/integration/ha_tests/test_replication.py b/tests/integration/ha_tests/test_replication.py
index dd924948da..b1c8da31a4 100644
--- a/tests/integration/ha_tests/test_replication.py
+++ b/tests/integration/ha_tests/test_replication.py
@@ -18,16 +18,14 @@
)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
"""Build and deploy three unit of PostgreSQL."""
wait_for_apps = False
# It is possible for users to provide their own cluster for HA testing. Hence, check if there
# is a pre-existing cluster.
if not await app_name(ops_test):
wait_for_apps = True
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
@@ -51,7 +49,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle(status="active", timeout=1500)
-@pytest.mark.group(1)
async def test_reelection(ops_test: OpsTest, continuous_writes, primary_start_timeout) -> None:
"""Kill primary unit, check reelection."""
app = await app_name(ops_test)
@@ -89,7 +86,6 @@ async def test_reelection(ops_test: OpsTest, continuous_writes, primary_start_ti
await check_writes(ops_test)
-@pytest.mark.group(1)
async def test_consistency(ops_test: OpsTest, continuous_writes) -> None:
"""Write to primary, read data from secondaries (check consistency)."""
# Locate primary unit.
@@ -106,8 +102,9 @@ async def test_consistency(ops_test: OpsTest, continuous_writes) -> None:
await check_writes(ops_test)
-@pytest.mark.group(1)
-async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous_writes) -> None:
+async def test_no_data_replicated_between_clusters(
+ ops_test: OpsTest, charm, continuous_writes
+) -> None:
"""Check that writes in one cluster are not replicated to another cluster."""
# Locate primary unit.
app = await app_name(ops_test)
@@ -116,7 +113,6 @@ async def test_no_data_replicated_between_clusters(ops_test: OpsTest, continuous
# Deploy another cluster.
new_cluster_app = f"second-{app}"
if not await app_name(ops_test, new_cluster_app):
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
diff --git a/tests/integration/ha_tests/test_restore_cluster.py b/tests/integration/ha_tests/test_restore_cluster.py
index 9542dbb850..8a26b15cb5 100644
--- a/tests/integration/ha_tests/test_restore_cluster.py
+++ b/tests/integration/ha_tests/test_restore_cluster.py
@@ -29,12 +29,10 @@
charm = None
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
"""Build and deploy two PostgreSQL clusters."""
# This is a potentially destructive test, so it shouldn't be run against existing clusters
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
# Deploy the first cluster with reusable storage
await ops_test.model.deploy(
@@ -68,7 +66,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.destroy_unit(second_primary)
-@pytest.mark.group(1)
async def test_cluster_restore(ops_test):
"""Recreates the cluster from storage volumes."""
# Write some data.
diff --git a/tests/integration/ha_tests/test_scaling.py b/tests/integration/ha_tests/test_scaling.py
index f3105d38cd..6e43d1f83a 100644
--- a/tests/integration/ha_tests/test_scaling.py
+++ b/tests/integration/ha_tests/test_scaling.py
@@ -27,13 +27,11 @@
charm = None
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
"""Build and deploy two PostgreSQL clusters."""
# This is a potentially destructive test, so it shouldn't be run against existing clusters
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
# Deploy the first cluster with reusable storage
await gather(
@@ -55,7 +53,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle(status="active", timeout=1500)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_removing_stereo_primary(ops_test: OpsTest, continuous_writes) -> None:
@@ -105,7 +102,6 @@ async def test_removing_stereo_primary(ops_test: OpsTest, continuous_writes) ->
await check_writes(ops_test)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_removing_stereo_sync_standby(ops_test: OpsTest, continuous_writes) -> None:
@@ -140,7 +136,6 @@ async def test_removing_stereo_sync_standby(ops_test: OpsTest, continuous_writes
await check_writes(ops_test)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_scale_to_five_units(ops_test: OpsTest) -> None:
@@ -148,7 +143,6 @@ async def test_scale_to_five_units(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle(status="active", timeout=1500)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_removing_raft_majority(ops_test: OpsTest, continuous_writes) -> None:
@@ -206,7 +200,6 @@ async def test_removing_raft_majority(ops_test: OpsTest, continuous_writes) -> N
assert new_roles["primaries"][0] == original_roles["sync_standbys"][1]
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
async def test_removing_raft_majority_async(ops_test: OpsTest, continuous_writes) -> None:
diff --git a/tests/integration/ha_tests/test_scaling_three_units.py b/tests/integration/ha_tests/test_scaling_three_units.py
index 6817cd238a..74a1e8ba4b 100644
--- a/tests/integration/ha_tests/test_scaling_three_units.py
+++ b/tests/integration/ha_tests/test_scaling_three_units.py
@@ -28,13 +28,11 @@
charm = None
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
"""Build and deploy two PostgreSQL clusters."""
# This is a potentially destructive test, so it shouldn't be run against existing clusters
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
# Deploy the first cluster with reusable storage
await gather(
@@ -56,7 +54,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle(status="active", timeout=1500)
-@pytest.mark.group(1)
@markers.juju3
@pytest.mark.parametrize(
"roles",
diff --git a/tests/integration/ha_tests/test_self_healing.py b/tests/integration/ha_tests/test_self_healing.py
index 12b61a4fd7..f3ddc6fe88 100644
--- a/tests/integration/ha_tests/test_self_healing.py
+++ b/tests/integration/ha_tests/test_self_healing.py
@@ -62,16 +62,14 @@
MEDIAN_ELECTION_TIME = 10
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_build_and_deploy(ops_test: OpsTest) -> None:
+async def test_build_and_deploy(ops_test: OpsTest, charm) -> None:
"""Build and deploy three unit of PostgreSQL."""
wait_for_apps = False
# It is possible for users to provide their own cluster for HA testing. Hence, check if there
# is a pre-existing cluster.
if not await app_name(ops_test):
wait_for_apps = True
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
@@ -96,7 +94,6 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
await ops_test.model.wait_for_idle(status="active", timeout=1500)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_storage_re_use(ops_test, continuous_writes):
"""Verifies that database units with attached storage correctly repurpose storage.
@@ -144,7 +141,6 @@ async def test_storage_re_use(ops_test, continuous_writes):
)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.parametrize("process", DB_PROCESSES)
@pytest.mark.parametrize("signal", ["SIGTERM", "SIGKILL"])
@@ -179,7 +175,6 @@ async def test_interruption_db_process(
await is_cluster_updated(ops_test, primary_name)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.parametrize("process", DB_PROCESSES)
async def test_freeze_db_process(
@@ -221,7 +216,6 @@ async def test_freeze_db_process(
await is_cluster_updated(ops_test, primary_name)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.parametrize("process", DB_PROCESSES)
@pytest.mark.parametrize("signal", ["SIGTERM", "SIGKILL"])
@@ -309,9 +303,8 @@ async def test_full_cluster_restart(
await check_writes(ops_test)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-@pytest.mark.unstable
+@pytest.mark.skip(reason="Unstable")
async def test_forceful_restart_without_data_and_transaction_logs(
ops_test: OpsTest,
continuous_writes,
@@ -386,7 +379,6 @@ async def test_forceful_restart_without_data_and_transaction_logs(
await is_cluster_updated(ops_test, primary_name)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_network_cut(ops_test: OpsTest, continuous_writes, primary_start_timeout):
"""Completely cut and restore network."""
@@ -475,7 +467,6 @@ async def test_network_cut(ops_test: OpsTest, continuous_writes, primary_start_t
await is_cluster_updated(ops_test, primary_name, use_ip_from_inside=True)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_network_cut_without_ip_change(
ops_test: OpsTest, continuous_writes, primary_start_timeout
diff --git a/tests/integration/ha_tests/test_smoke.py b/tests/integration/ha_tests/test_smoke.py
index ea872d45d0..3e718522ee 100644
--- a/tests/integration/ha_tests/test_smoke.py
+++ b/tests/integration/ha_tests/test_smoke.py
@@ -33,7 +33,6 @@
logger = logging.getLogger(__name__)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_app_force_removal(ops_test: OpsTest, charm: str):
"""Remove unit with force while storage is alive."""
@@ -93,7 +92,6 @@ async def test_app_force_removal(ops_test: OpsTest, charm: str):
assert await is_storage_exists(ops_test, storage_id_str)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_charm_garbage_ignorance(ops_test: OpsTest, charm: str):
"""Test charm deploy in dirty environment with garbage storage."""
@@ -133,7 +131,6 @@ async def test_charm_garbage_ignorance(ops_test: OpsTest, charm: str):
await ops_test.model.destroy_unit(primary_name)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.skipif(juju_major_version < 3, reason="Requires juju 3 or higher")
async def test_app_resources_conflicts_v3(ops_test: OpsTest, charm: str):
@@ -173,7 +170,6 @@ async def test_app_resources_conflicts_v3(ops_test: OpsTest, charm: str):
)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.skipif(juju_major_version != 2, reason="Requires juju 2")
async def test_app_resources_conflicts_v2(ops_test: OpsTest, charm: str):
diff --git a/tests/integration/ha_tests/test_upgrade.py b/tests/integration/ha_tests/test_upgrade.py
index 497c7ace9a..06b98bcceb 100644
--- a/tests/integration/ha_tests/test_upgrade.py
+++ b/tests/integration/ha_tests/test_upgrade.py
@@ -29,7 +29,6 @@
TIMEOUT = 600
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_deploy_latest(ops_test: OpsTest) -> None:
"""Simple test to ensure that the PostgreSQL and application charms get deployed."""
@@ -52,7 +51,6 @@ async def test_deploy_latest(ops_test: OpsTest) -> None:
assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
"""Test that the pre-upgrade-check action runs successfully."""
@@ -65,9 +63,8 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
await action.wait()
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None:
+async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes, charm) -> None:
# Start an application that continuously writes data to the database.
logger.info("starting continuous writes to the database")
await start_continuous_writes(ops_test, DATABASE_APP_NAME)
@@ -81,9 +78,6 @@ async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None:
application = ops_test.model.applications[DATABASE_APP_NAME]
- logger.info("Build charm locally")
- charm = await ops_test.build_charm(".")
-
logger.info("Refresh the charm")
await application.refresh(path=charm)
@@ -115,9 +109,8 @@ async def test_upgrade_from_edge(ops_test: OpsTest, continuous_writes) -> None:
)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_fail_and_rollback(ops_test, continuous_writes) -> None:
+async def test_fail_and_rollback(ops_test, charm, continuous_writes) -> None:
# Start an application that continuously writes data to the database.
logger.info("starting continuous writes to the database")
await start_continuous_writes(ops_test, DATABASE_APP_NAME)
@@ -134,10 +127,9 @@ async def test_fail_and_rollback(ops_test, continuous_writes) -> None:
action = await leader_unit.run_action("pre-upgrade-check")
await action.wait()
- local_charm = await ops_test.build_charm(".")
- filename = local_charm.split("/")[-1] if isinstance(local_charm, str) else local_charm.name
+ filename = Path(charm).name
fault_charm = Path("/tmp/", filename)
- shutil.copy(local_charm, fault_charm)
+ shutil.copy(charm, fault_charm)
logger.info("Inject dependency fault")
await inject_dependency_fault(ops_test, DATABASE_APP_NAME, fault_charm)
@@ -162,7 +154,7 @@ async def test_fail_and_rollback(ops_test, continuous_writes) -> None:
await action.wait()
logger.info("Re-refresh the charm")
- await application.refresh(path=local_charm)
+ await application.refresh(path=charm)
logger.info("Wait for upgrade to start")
await ops_test.model.block_until(
diff --git a/tests/integration/ha_tests/test_upgrade_from_stable.py b/tests/integration/ha_tests/test_upgrade_from_stable.py
index 977ec9c067..db0586a2ab 100644
--- a/tests/integration/ha_tests/test_upgrade_from_stable.py
+++ b/tests/integration/ha_tests/test_upgrade_from_stable.py
@@ -25,7 +25,6 @@
TIMEOUT = 900
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_deploy_stable(ops_test: OpsTest) -> None:
"""Simple test to ensure that the PostgreSQL and application charms get deployed."""
@@ -76,7 +75,6 @@ async def test_deploy_stable(ops_test: OpsTest) -> None:
assert len(ops_test.model.applications[DATABASE_APP_NAME].units) == 3
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
"""Test that the pre-upgrade-check action runs successfully."""
@@ -94,9 +92,8 @@ async def test_pre_upgrade_check(ops_test: OpsTest) -> None:
await action.wait()
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_upgrade_from_stable(ops_test: OpsTest):
+async def test_upgrade_from_stable(ops_test: OpsTest, charm):
"""Test updating from stable channel."""
# Start an application that continuously writes data to the database.
logger.info("starting continuous writes to the database")
@@ -112,9 +109,6 @@ async def test_upgrade_from_stable(ops_test: OpsTest):
application = ops_test.model.applications[DATABASE_APP_NAME]
actions = await application.get_actions()
- logger.info("Build charm locally")
- charm = await ops_test.build_charm(".")
-
logger.info("Refresh the charm")
await application.refresh(path=charm)
diff --git a/tests/integration/new_relations/test_new_relations.py b/tests/integration/new_relations/test_new_relations_1.py
similarity index 93%
rename from tests/integration/new_relations/test_new_relations.py
rename to tests/integration/new_relations/test_new_relations_1.py
index 70069e86e4..42571a80b9 100644
--- a/tests/integration/new_relations/test_new_relations.py
+++ b/tests/integration/new_relations/test_new_relations_1.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python3
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
import asyncio
@@ -13,7 +12,6 @@
from pytest_operator.plugin import OpsTest
from tenacity import Retrying, stop_after_attempt, wait_fixed
-from .. import markers
from ..helpers import (
CHARM_BASE,
assert_sync_standbys,
@@ -47,7 +45,6 @@
INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles"
-@pytest.mark.group("new_relations_tests")
@pytest.mark.abort_on_fail
async def test_deploy_charms(ops_test: OpsTest, charm):
"""Deploy both charms (application and database) to use in the tests."""
@@ -81,7 +78,6 @@ async def test_deploy_charms(ops_test: OpsTest, charm):
await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", timeout=3000)
-@pytest.mark.group("new_relations_tests")
async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest):
"""Test that there is no read-only endpoint in a standalone cluster."""
async with ops_test.fast_forward():
@@ -128,7 +124,6 @@ async def test_no_read_only_endpoint_in_standalone_cluster(ops_test: OpsTest):
)
-@pytest.mark.group("new_relations_tests")
async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest):
"""Test that there is read-only endpoint in a scaled up cluster."""
async with ops_test.fast_forward():
@@ -146,7 +141,6 @@ async def test_read_only_endpoint_in_scaled_up_cluster(ops_test: OpsTest):
)
-@pytest.mark.group("new_relations_tests")
async def test_database_relation_with_charm_libraries(ops_test: OpsTest):
"""Test basic functionality of database relation interface."""
# Get the connection string to connect to the database using the read/write endpoint.
@@ -194,7 +188,6 @@ async def test_database_relation_with_charm_libraries(ops_test: OpsTest):
cursor.execute("DROP TABLE test;")
-@pytest.mark.group("new_relations_tests")
@pytest.mark.abort_on_fail
async def test_filter_out_degraded_replicas(ops_test: OpsTest):
primary = await get_primary(ops_test, f"{DATABASE_APP_NAME}/0")
@@ -225,7 +218,6 @@ async def test_filter_out_degraded_replicas(ops_test: OpsTest):
)
-@pytest.mark.group("new_relations_tests")
async def test_user_with_extra_roles(ops_test: OpsTest):
"""Test superuser actions and the request for more permissions."""
# Get the connection string to connect to the database.
@@ -246,7 +238,6 @@ async def test_user_with_extra_roles(ops_test: OpsTest):
connection.close()
-@pytest.mark.group("new_relations_tests")
async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: OpsTest):
"""Test that two different application connect to the database with different credentials."""
# Set some variables to use in this test.
@@ -300,7 +291,6 @@ async def test_two_applications_doesnt_share_the_same_relation_data(ops_test: Op
psycopg2.connect(connection_string)
-@pytest.mark.group("new_relations_tests")
async def test_an_application_can_connect_to_multiple_database_clusters(ops_test: OpsTest):
"""Test that an application can connect to different clusters of the same database."""
# Relate the application with both database clusters
@@ -331,7 +321,6 @@ async def test_an_application_can_connect_to_multiple_database_clusters(ops_test
assert application_connection_string != another_application_connection_string
-@pytest.mark.group("new_relations_tests")
async def test_an_application_can_connect_to_multiple_aliased_database_clusters(ops_test: OpsTest):
"""Test that an application can connect to different clusters of the same database."""
# Relate the application with both database clusters
@@ -365,7 +354,6 @@ async def test_an_application_can_connect_to_multiple_aliased_database_clusters(
assert application_connection_string != another_application_connection_string
-@pytest.mark.group("new_relations_tests")
async def test_an_application_can_request_multiple_databases(ops_test: OpsTest):
"""Test that an application can request additional databases using the same interface."""
# Relate the charms using another relation and wait for them exchanging some connection data.
@@ -386,7 +374,6 @@ async def test_an_application_can_request_multiple_databases(ops_test: OpsTest):
assert first_database_connection_string != second_database_connection_string
-@pytest.mark.group("new_relations_tests")
@pytest.mark.abort_on_fail
async def test_relation_data_is_updated_correctly_when_scaling(ops_test: OpsTest):
"""Test that relation data, like connection data, is updated correctly when scaling."""
@@ -467,7 +454,6 @@ async def test_relation_data_is_updated_correctly_when_scaling(ops_test: OpsTest
psycopg2.connect(primary_connection_string)
-@pytest.mark.group("new_relations_tests")
async def test_relation_with_no_database_name(ops_test: OpsTest):
"""Test that a relation with no database name doesn't block the charm."""
async with ops_test.fast_forward():
@@ -484,7 +470,6 @@ async def test_relation_with_no_database_name(ops_test: OpsTest):
await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", raise_on_blocked=True)
-@pytest.mark.group("new_relations_tests")
async def test_admin_role(ops_test: OpsTest):
"""Test that the admin role gives access to all the databases."""
all_app_names = [DATA_INTEGRATOR_APP_NAME]
@@ -567,7 +552,6 @@ async def test_admin_role(ops_test: OpsTest):
connection.close()
-@pytest.mark.group("new_relations_tests")
async def test_invalid_extra_user_roles(ops_test: OpsTest):
async with ops_test.fast_forward():
# Remove the relation between the database and the first data integrator.
@@ -627,43 +611,3 @@ async def test_invalid_extra_user_roles(ops_test: OpsTest):
raise_on_blocked=False,
timeout=1000,
)
-
-
-@pytest.mark.group("nextcloud_blocked")
-@markers.amd64_only # nextcloud charm not available for arm64
-async def test_nextcloud_db_blocked(ops_test: OpsTest, charm: str) -> None:
- # Deploy Database Charm and Nextcloud
- await asyncio.gather(
- ops_test.model.deploy(
- charm,
- application_name=DATABASE_APP_NAME,
- num_units=1,
- base=CHARM_BASE,
- config={"profile": "testing"},
- ),
- ops_test.model.deploy(
- "nextcloud",
- channel="edge",
- application_name="nextcloud",
- num_units=1,
- base=CHARM_BASE,
- ),
- )
- await asyncio.gather(
- ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000),
- ops_test.model.wait_for_idle(
- apps=["nextcloud"],
- status="blocked",
- raise_on_blocked=False,
- timeout=2000,
- ),
- )
-
- await ops_test.model.relate("nextcloud:database", f"{DATABASE_APP_NAME}:database")
-
- await ops_test.model.wait_for_idle(
- apps=[DATABASE_APP_NAME, "nextcloud"],
- status="active",
- raise_on_blocked=False,
- timeout=1000,
- )
diff --git a/tests/integration/new_relations/test_new_relations_2.py b/tests/integration/new_relations/test_new_relations_2.py
new file mode 100644
index 0000000000..08827c168c
--- /dev/null
+++ b/tests/integration/new_relations/test_new_relations_2.py
@@ -0,0 +1,67 @@
+# Copyright 2022 Canonical Ltd.
+# See LICENSE file for licensing details.
+import asyncio
+import logging
+from pathlib import Path
+
+import yaml
+from pytest_operator.plugin import OpsTest
+
+from .. import markers
+from ..helpers import (
+ CHARM_BASE,
+)
+
+logger = logging.getLogger(__name__)
+
+APPLICATION_APP_NAME = "postgresql-test-app"
+DATABASE_APP_NAME = "database"
+ANOTHER_DATABASE_APP_NAME = "another-database"
+DATA_INTEGRATOR_APP_NAME = "data-integrator"
+APP_NAMES = [APPLICATION_APP_NAME, DATABASE_APP_NAME, ANOTHER_DATABASE_APP_NAME]
+DATABASE_APP_METADATA = yaml.safe_load(Path("./metadata.yaml").read_text())
+FIRST_DATABASE_RELATION_NAME = "database"
+SECOND_DATABASE_RELATION_NAME = "second-database"
+MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "multiple-database-clusters"
+ALIASED_MULTIPLE_DATABASE_CLUSTERS_RELATION_NAME = "aliased-multiple-database-clusters"
+NO_DATABASE_RELATION_NAME = "no-database"
+INVALID_EXTRA_USER_ROLE_BLOCKING_MESSAGE = "invalid role(s) for extra user roles"
+
+
+@markers.amd64_only # nextcloud charm not available for arm64
+async def test_nextcloud_db_blocked(ops_test: OpsTest, charm: str) -> None:
+ # Deploy Database Charm and Nextcloud
+ await asyncio.gather(
+ ops_test.model.deploy(
+ charm,
+ application_name=DATABASE_APP_NAME,
+ num_units=1,
+ base=CHARM_BASE,
+ config={"profile": "testing"},
+ ),
+ ops_test.model.deploy(
+ "nextcloud",
+ channel="edge",
+ application_name="nextcloud",
+ num_units=1,
+ base=CHARM_BASE,
+ ),
+ )
+ await asyncio.gather(
+ ops_test.model.wait_for_idle(apps=[DATABASE_APP_NAME], status="active", timeout=2000),
+ ops_test.model.wait_for_idle(
+ apps=["nextcloud"],
+ status="blocked",
+ raise_on_blocked=False,
+ timeout=2000,
+ ),
+ )
+
+ await ops_test.model.relate("nextcloud:database", f"{DATABASE_APP_NAME}:database")
+
+ await ops_test.model.wait_for_idle(
+ apps=[DATABASE_APP_NAME, "nextcloud"],
+ status="active",
+ raise_on_blocked=False,
+ timeout=1000,
+ )
diff --git a/tests/integration/new_relations/test_relations_coherence.py b/tests/integration/new_relations/test_relations_coherence.py
index 1f2a751922..fa44d33399 100644
--- a/tests/integration/new_relations/test_relations_coherence.py
+++ b/tests/integration/new_relations/test_relations_coherence.py
@@ -11,7 +11,7 @@
from ..helpers import CHARM_BASE, DATABASE_APP_NAME
from .helpers import build_connection_string
-from .test_new_relations import DATA_INTEGRATOR_APP_NAME
+from .test_new_relations_1 import DATA_INTEGRATOR_APP_NAME
logger = logging.getLogger(__name__)
@@ -20,7 +20,6 @@
FIRST_DATABASE_RELATION_NAME = "database"
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_relations(ops_test: OpsTest, charm):
"""Test that check relation data."""
diff --git a/tests/integration/relations/test_relations.py b/tests/integration/relations/test_relations.py
index b3a542decc..0037b5c140 100644
--- a/tests/integration/relations/test_relations.py
+++ b/tests/integration/relations/test_relations.py
@@ -10,7 +10,7 @@
from tenacity import Retrying, stop_after_delay, wait_fixed
from ..helpers import CHARM_BASE, METADATA
-from ..new_relations.test_new_relations import APPLICATION_APP_NAME, build_connection_string
+from ..new_relations.test_new_relations_1 import APPLICATION_APP_NAME, build_connection_string
from ..relations.helpers import get_legacy_db_connection_str
logger = logging.getLogger(__name__)
@@ -25,7 +25,6 @@
APP_NAMES = [APP_NAME, DATABASE_APP_NAME, DB_APP_NAME]
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_deploy_charms(ops_test: OpsTest, charm):
"""Deploy both charms (application and database) to use in the tests."""
@@ -63,7 +62,6 @@ async def test_deploy_charms(ops_test: OpsTest, charm):
await ops_test.model.wait_for_idle(apps=APP_NAMES, status="active", timeout=3000)
-@pytest.mark.group(1)
async def test_legacy_endpoint_with_multiple_related_endpoints(ops_test: OpsTest):
await ops_test.model.relate(f"{DB_APP_NAME}:{DB_RELATION}", f"{APP_NAME}:{DB_RELATION}")
await ops_test.model.relate(APP_NAME, f"{DATABASE_APP_NAME}:{FIRST_DATABASE_RELATION}")
@@ -104,7 +102,6 @@ async def test_legacy_endpoint_with_multiple_related_endpoints(ops_test: OpsTest
psycopg2.connect(legacy_interface_connect)
-@pytest.mark.group(1)
async def test_modern_endpoint_with_multiple_related_endpoints(ops_test: OpsTest):
await ops_test.model.relate(f"{DB_APP_NAME}:{DB_RELATION}", f"{APP_NAME}:{DB_RELATION}")
await ops_test.model.relate(APP_NAME, f"{DATABASE_APP_NAME}:{FIRST_DATABASE_RELATION}")
diff --git a/tests/integration/test_audit.py b/tests/integration/test_audit.py
index 257ef9cf70..4b4c3ae5e0 100644
--- a/tests/integration/test_audit.py
+++ b/tests/integration/test_audit.py
@@ -21,7 +21,6 @@
RELATION_ENDPOINT = "database"
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_audit_plugin(ops_test: OpsTest, charm) -> None:
"""Test the audit plugin."""
diff --git a/tests/integration/test_backups_aws.py b/tests/integration/test_backups_aws.py
new file mode 100644
index 0000000000..76343af0ac
--- /dev/null
+++ b/tests/integration/test_backups_aws.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python3
+# Copyright 2023 Canonical Ltd.
+# See LICENSE file for licensing details.
+import logging
+import os
+import uuid
+
+import boto3
+import pytest as pytest
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_attempt, wait_exponential
+
+from . import architecture
+from .helpers import (
+ DATABASE_APP_NAME,
+ backup_operations,
+ construct_endpoint,
+ db_connect,
+ get_password,
+ get_primary,
+ get_unit_address,
+ scale_application,
+ switchover,
+)
+from .juju_ import juju_major_version
+
+ANOTHER_CLUSTER_REPOSITORY_ERROR_MESSAGE = "the S3 repository has backups from another cluster"
+FAILED_TO_ACCESS_CREATE_BUCKET_ERROR_MESSAGE = (
+ "failed to access/create the bucket, check your S3 settings"
+)
+S3_INTEGRATOR_APP_NAME = "s3-integrator"
+if juju_major_version < 3:
+ tls_certificates_app_name = "tls-certificates-operator"
+ tls_channel = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable"
+ tls_config = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"}
+else:
+ tls_certificates_app_name = "self-signed-certificates"
+ tls_channel = "latest/edge" if architecture.architecture == "arm64" else "latest/stable"
+ tls_config = {"ca-common-name": "Test CA"}
+
+logger = logging.getLogger(__name__)
+
+AWS = "AWS"
+GCP = "GCP"
+
+
+@pytest.fixture(scope="module")
+async def cloud_configs() -> None:
+ # Define some configurations and credentials.
+ configs = {
+ AWS: {
+ "endpoint": "https://s3.amazonaws.com",
+ "bucket": "data-charms-testing",
+ "path": f"/postgresql-vm/{uuid.uuid1()}",
+ "region": "us-east-1",
+ },
+ GCP: {
+ "endpoint": "https://storage.googleapis.com",
+ "bucket": "data-charms-testing",
+ "path": f"/postgresql-vm/{uuid.uuid1()}",
+ "region": "",
+ },
+ }
+ credentials = {
+ AWS: {
+ "access-key": os.environ["AWS_ACCESS_KEY"],
+ "secret-key": os.environ["AWS_SECRET_KEY"],
+ },
+ GCP: {
+ "access-key": os.environ["GCP_ACCESS_KEY"],
+ "secret-key": os.environ["GCP_SECRET_KEY"],
+ },
+ }
+ yield configs, credentials
+ # Delete the previously created objects.
+ logger.info("deleting the previously created backups")
+ for cloud, config in configs.items():
+ session = boto3.session.Session(
+ aws_access_key_id=credentials[cloud]["access-key"],
+ aws_secret_access_key=credentials[cloud]["secret-key"],
+ region_name=config["region"],
+ )
+ s3 = session.resource(
+ "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"])
+ )
+ bucket = s3.Bucket(config["bucket"])
+ # GCS doesn't support batch delete operation, so delete the objects one by one.
+ for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")):
+ bucket_object.delete()
+
+
+@pytest.mark.abort_on_fail
+async def test_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
+ """Build and deploy two units of PostgreSQL in AWS, test backup and restore actions."""
+ config = cloud_configs[0][AWS]
+ credentials = cloud_configs[1][AWS]
+
+ await backup_operations(
+ ops_test,
+ S3_INTEGRATOR_APP_NAME,
+ tls_certificates_app_name,
+ tls_config,
+ tls_channel,
+ credentials,
+ AWS,
+ config,
+ charm,
+ )
+ database_app_name = f"{DATABASE_APP_NAME}-aws"
+
+ # Remove the relation to the TLS certificates operator.
+ await ops_test.model.applications[database_app_name].remove_relation(
+ f"{database_app_name}:certificates", f"{tls_certificates_app_name}:certificates"
+ )
+
+ new_unit_name = f"{database_app_name}/2"
+
+ # Scale up to be able to test primary and leader being different.
+ async with ops_test.fast_forward():
+ await scale_application(ops_test, database_app_name, 2)
+
+ # Ensure replication is working correctly.
+ address = get_unit_address(ops_test, new_unit_name)
+ password = await get_password(ops_test, new_unit_name)
+ patroni_password = await get_password(ops_test, new_unit_name, "patroni")
+ with db_connect(host=address, password=password) as connection, connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT EXISTS (SELECT FROM information_schema.tables"
+ " WHERE table_schema = 'public' AND table_name = 'backup_table_1');"
+ )
+ assert cursor.fetchone()[0], (
+ f"replication isn't working correctly: table 'backup_table_1' doesn't exist in {new_unit_name}"
+ )
+ cursor.execute(
+ "SELECT EXISTS (SELECT FROM information_schema.tables"
+ " WHERE table_schema = 'public' AND table_name = 'backup_table_2');"
+ )
+ assert not cursor.fetchone()[0], (
+ f"replication isn't working correctly: table 'backup_table_2' exists in {new_unit_name}"
+ )
+ connection.close()
+
+ old_primary = await get_primary(ops_test, new_unit_name)
+ switchover(ops_test, old_primary, patroni_password, new_unit_name)
+
+ # Get the new primary unit.
+ primary = await get_primary(ops_test, new_unit_name)
+ # Check that the primary changed.
+ for attempt in Retrying(
+ stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=30)
+ ):
+ with attempt:
+ assert primary == new_unit_name
+
+ # Ensure stanza is working correctly.
+ logger.info("listing the available backups")
+ action = await ops_test.model.units.get(new_unit_name).run_action("list-backups")
+ await action.wait()
+ backups = action.results.get("backups")
+ assert backups, "backups not outputted"
+
+ await ops_test.model.wait_for_idle(status="active", timeout=1000)
+
+ # Remove the database app.
+ await ops_test.model.remove_application(database_app_name, block_until_done=True)
+
+ # Remove the TLS operator.
+ await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)
diff --git a/tests/integration/test_backups_ceph.py b/tests/integration/test_backups_ceph.py
index 99a57fb1e1..bee798191a 100644
--- a/tests/integration/test_backups_ceph.py
+++ b/tests/integration/test_backups_ceph.py
@@ -190,7 +190,6 @@ def cloud_configs(microceph: ConnectionInformation):
}
-@pytest.mark.group("ceph")
@markers.amd64_only
async def test_backup_ceph(ops_test: OpsTest, cloud_configs, cloud_credentials, charm) -> None:
"""Build and deploy two units of PostgreSQL in microceph, test backup and restore actions."""
diff --git a/tests/integration/test_backups.py b/tests/integration/test_backups_gcp.py
similarity index 72%
rename from tests/integration/test_backups.py
rename to tests/integration/test_backups_gcp.py
index e087abd5b3..63cb3617bd 100644
--- a/tests/integration/test_backups.py
+++ b/tests/integration/test_backups_gcp.py
@@ -2,6 +2,7 @@
# Copyright 2023 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
+import os
import uuid
import boto3
@@ -17,10 +18,7 @@
construct_endpoint,
db_connect,
get_password,
- get_primary,
get_unit_address,
- scale_application,
- switchover,
wait_for_idle_on_blocked,
)
from .juju_ import juju_major_version
@@ -46,7 +44,7 @@
@pytest.fixture(scope="module")
-async def cloud_configs(github_secrets) -> None:
+async def cloud_configs() -> None:
# Define some configurations and credentials.
configs = {
AWS: {
@@ -64,12 +62,12 @@ async def cloud_configs(github_secrets) -> None:
}
credentials = {
AWS: {
- "access-key": github_secrets["AWS_ACCESS_KEY"],
- "secret-key": github_secrets["AWS_SECRET_KEY"],
+ "access-key": os.environ["AWS_ACCESS_KEY"],
+ "secret-key": os.environ["AWS_SECRET_KEY"],
},
GCP: {
- "access-key": github_secrets["GCP_ACCESS_KEY"],
- "secret-key": github_secrets["GCP_SECRET_KEY"],
+ "access-key": os.environ["GCP_ACCESS_KEY"],
+ "secret-key": os.environ["GCP_SECRET_KEY"],
},
}
yield configs, credentials
@@ -90,87 +88,6 @@ async def cloud_configs(github_secrets) -> None:
bucket_object.delete()
-@pytest.mark.group("AWS")
-@pytest.mark.abort_on_fail
-async def test_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
- """Build and deploy two units of PostgreSQL in AWS, test backup and restore actions."""
- config = cloud_configs[0][AWS]
- credentials = cloud_configs[1][AWS]
-
- await backup_operations(
- ops_test,
- S3_INTEGRATOR_APP_NAME,
- tls_certificates_app_name,
- tls_config,
- tls_channel,
- credentials,
- AWS,
- config,
- charm,
- )
- database_app_name = f"{DATABASE_APP_NAME}-aws"
-
- # Remove the relation to the TLS certificates operator.
- await ops_test.model.applications[database_app_name].remove_relation(
- f"{database_app_name}:certificates", f"{tls_certificates_app_name}:certificates"
- )
-
- new_unit_name = f"{database_app_name}/2"
-
- # Scale up to be able to test primary and leader being different.
- async with ops_test.fast_forward():
- await scale_application(ops_test, database_app_name, 2)
-
- # Ensure replication is working correctly.
- address = get_unit_address(ops_test, new_unit_name)
- password = await get_password(ops_test, new_unit_name)
- patroni_password = await get_password(ops_test, new_unit_name, "patroni")
- with db_connect(host=address, password=password) as connection, connection.cursor() as cursor:
- cursor.execute(
- "SELECT EXISTS (SELECT FROM information_schema.tables"
- " WHERE table_schema = 'public' AND table_name = 'backup_table_1');"
- )
- assert cursor.fetchone()[0], (
- f"replication isn't working correctly: table 'backup_table_1' doesn't exist in {new_unit_name}"
- )
- cursor.execute(
- "SELECT EXISTS (SELECT FROM information_schema.tables"
- " WHERE table_schema = 'public' AND table_name = 'backup_table_2');"
- )
- assert not cursor.fetchone()[0], (
- f"replication isn't working correctly: table 'backup_table_2' exists in {new_unit_name}"
- )
- connection.close()
-
- old_primary = await get_primary(ops_test, new_unit_name)
- switchover(ops_test, old_primary, patroni_password, new_unit_name)
-
- # Get the new primary unit.
- primary = await get_primary(ops_test, new_unit_name)
- # Check that the primary changed.
- for attempt in Retrying(
- stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=30)
- ):
- with attempt:
- assert primary == new_unit_name
-
- # Ensure stanza is working correctly.
- logger.info("listing the available backups")
- action = await ops_test.model.units.get(new_unit_name).run_action("list-backups")
- await action.wait()
- backups = action.results.get("backups")
- assert backups, "backups not outputted"
-
- await ops_test.model.wait_for_idle(status="active", timeout=1000)
-
- # Remove the database app.
- await ops_test.model.remove_application(database_app_name, block_until_done=True)
-
- # Remove the TLS operator.
- await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)
-
-
-@pytest.mark.group("GCP")
@pytest.mark.abort_on_fail
async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
"""Build and deploy two units of PostgreSQL in GCP, test backup and restore actions."""
@@ -197,8 +114,7 @@ async def test_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], c
await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)
-@pytest.mark.group("GCP")
-async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets, charm) -> None:
+async def test_restore_on_new_cluster(ops_test: OpsTest, charm) -> None:
"""Test that is possible to restore a backup to another PostgreSQL cluster."""
previous_database_app_name = f"{DATABASE_APP_NAME}-gcp"
database_app_name = f"new-{DATABASE_APP_NAME}"
@@ -295,7 +211,6 @@ async def test_restore_on_new_cluster(ops_test: OpsTest, github_secrets, charm)
connection.close()
-@pytest.mark.group("GCP")
async def test_invalid_config_and_recovery_after_fixing_it(
ops_test: OpsTest, cloud_configs: tuple[dict, dict]
) -> None:
diff --git a/tests/integration/test_backups_pitr.py b/tests/integration/test_backups_pitr_aws.py
similarity index 94%
rename from tests/integration/test_backups_pitr.py
rename to tests/integration/test_backups_pitr_aws.py
index 9a76ad3a5a..70da90c104 100644
--- a/tests/integration/test_backups_pitr.py
+++ b/tests/integration/test_backups_pitr_aws.py
@@ -2,6 +2,7 @@
# Copyright 2024 Canonical Ltd.
# See LICENSE file for licensing details.
import logging
+import os
import uuid
import boto3
@@ -39,7 +40,7 @@
@pytest.fixture(scope="module")
-async def cloud_configs(github_secrets) -> None:
+async def cloud_configs() -> None:
# Define some configurations and credentials.
configs = {
AWS: {
@@ -57,12 +58,12 @@ async def cloud_configs(github_secrets) -> None:
}
credentials = {
AWS: {
- "access-key": github_secrets["AWS_ACCESS_KEY"],
- "secret-key": github_secrets["AWS_SECRET_KEY"],
+ "access-key": os.environ["AWS_ACCESS_KEY"],
+ "secret-key": os.environ["AWS_SECRET_KEY"],
},
GCP: {
- "access-key": github_secrets["GCP_ACCESS_KEY"],
- "secret-key": github_secrets["GCP_SECRET_KEY"],
+ "access-key": os.environ["GCP_ACCESS_KEY"],
+ "secret-key": os.environ["GCP_SECRET_KEY"],
},
}
yield configs, credentials
@@ -374,7 +375,6 @@ async def pitr_backup_operations(
await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)
-@pytest.mark.group("AWS")
@pytest.mark.abort_on_fail
async def test_pitr_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
"""Build, deploy two units of PostgreSQL and do backup in AWS. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action."""
@@ -394,26 +394,6 @@ async def test_pitr_backup_aws(ops_test: OpsTest, cloud_configs: tuple[dict, dic
)
-@pytest.mark.group("GCP")
-@pytest.mark.abort_on_fail
-async def test_pitr_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
- """Build, deploy two units of PostgreSQL and do backup in GCP. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action."""
- config = cloud_configs[0][GCP]
- credentials = cloud_configs[1][GCP]
-
- await pitr_backup_operations(
- ops_test,
- S3_INTEGRATOR_APP_NAME,
- TLS_CERTIFICATES_APP_NAME,
- TLS_CONFIG,
- TLS_CHANNEL,
- credentials,
- GCP,
- config,
- charm,
- )
-
-
def _create_table(host: str, password: str):
with db_connect(host=host, password=password) as connection:
connection.autocommit = True
diff --git a/tests/integration/test_backups_pitr_gcp.py b/tests/integration/test_backups_pitr_gcp.py
new file mode 100644
index 0000000000..e85ac25610
--- /dev/null
+++ b/tests/integration/test_backups_pitr_gcp.py
@@ -0,0 +1,440 @@
+#!/usr/bin/env python3
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+import logging
+import os
+import uuid
+
+import boto3
+import pytest as pytest
+from pytest_operator.plugin import OpsTest
+from tenacity import Retrying, stop_after_attempt, wait_exponential
+
+from . import architecture
+from .helpers import (
+ CHARM_BASE,
+ DATABASE_APP_NAME,
+ construct_endpoint,
+ db_connect,
+ get_password,
+ get_primary,
+ get_unit_address,
+)
+from .juju_ import juju_major_version
+
+CANNOT_RESTORE_PITR = "cannot restore PITR, juju debug-log for details"
+S3_INTEGRATOR_APP_NAME = "s3-integrator"
+if juju_major_version < 3:
+ TLS_CERTIFICATES_APP_NAME = "tls-certificates-operator"
+ TLS_CHANNEL = "legacy/edge" if architecture.architecture == "arm64" else "legacy/stable"
+ TLS_CONFIG = {"generate-self-signed-certificates": "true", "ca-common-name": "Test CA"}
+else:
+ TLS_CERTIFICATES_APP_NAME = "self-signed-certificates"
+ TLS_CHANNEL = "latest/edge" if architecture.architecture == "arm64" else "latest/stable"
+ TLS_CONFIG = {"ca-common-name": "Test CA"}
+
+logger = logging.getLogger(__name__)
+
+AWS = "AWS"
+GCP = "GCP"
+
+
+@pytest.fixture(scope="module")
+async def cloud_configs() -> None:
+ # Define some configurations and credentials.
+ configs = {
+ AWS: {
+ "endpoint": "https://s3.amazonaws.com",
+ "bucket": "data-charms-testing",
+ "path": f"/postgresql-vm/{uuid.uuid1()}",
+ "region": "us-east-1",
+ },
+ GCP: {
+ "endpoint": "https://storage.googleapis.com",
+ "bucket": "data-charms-testing",
+ "path": f"/postgresql-vm/{uuid.uuid1()}",
+ "region": "",
+ },
+ }
+ credentials = {
+ AWS: {
+ "access-key": os.environ["AWS_ACCESS_KEY"],
+ "secret-key": os.environ["AWS_SECRET_KEY"],
+ },
+ GCP: {
+ "access-key": os.environ["GCP_ACCESS_KEY"],
+ "secret-key": os.environ["GCP_SECRET_KEY"],
+ },
+ }
+ yield configs, credentials
+ # Delete the previously created objects.
+ logger.info("deleting the previously created backups")
+ for cloud, config in configs.items():
+ session = boto3.session.Session(
+ aws_access_key_id=credentials[cloud]["access-key"],
+ aws_secret_access_key=credentials[cloud]["secret-key"],
+ region_name=config["region"],
+ )
+ s3 = session.resource(
+ "s3", endpoint_url=construct_endpoint(config["endpoint"], config["region"])
+ )
+ bucket = s3.Bucket(config["bucket"])
+ # GCS doesn't support batch delete operation, so delete the objects one by one.
+ for bucket_object in bucket.objects.filter(Prefix=config["path"].lstrip("/")):
+ bucket_object.delete()
+
+
+async def pitr_backup_operations(
+ ops_test: OpsTest,
+ s3_integrator_app_name: str,
+ tls_certificates_app_name: str,
+ tls_config,
+ tls_channel,
+ credentials,
+ cloud,
+ config,
+ charm,
+) -> None:
+ """Basic set of operations for PITR backup and timelines management testing.
+
+ Below is presented algorithm in the next format: "(timeline): action_1 -> action_2".
+ 1: table -> backup_b1 -> test_data_td1 -> timestamp_ts1 -> test_data_td2 -> restore_ts1 => 2
+ 2: check_td1 -> check_not_td2 -> test_data_td3 -> restore_b1_latest => 3
+ 3: check_td1 -> check_td2 -> check_not_td3 -> test_data_td4 -> restore_t2_latest => 4
+ 4: check_td1 -> check_not_td2 -> check_td3 -> check_not_td4
+ """
+ # Set-up environment
+ database_app_name = f"{DATABASE_APP_NAME}-{cloud.lower()}"
+
+ logger.info("deploying the next charms: s3-integrator, self-signed-certificates, postgresql")
+ await ops_test.model.deploy(s3_integrator_app_name)
+ await ops_test.model.deploy(tls_certificates_app_name, config=tls_config, channel=tls_channel)
+ await ops_test.model.deploy(
+ charm,
+ application_name=database_app_name,
+ num_units=2,
+ base=CHARM_BASE,
+ config={"profile": "testing"},
+ )
+
+ logger.info(
+ "integrating self-signed-certificates with postgresql and waiting them to stabilize"
+ )
+ await ops_test.model.relate(database_app_name, tls_certificates_app_name)
+ async with ops_test.fast_forward(fast_interval="60s"):
+ await ops_test.model.wait_for_idle(
+ apps=[database_app_name, tls_certificates_app_name], status="active", timeout=1000
+ )
+
+ logger.info(f"configuring s3-integrator for {cloud}")
+ await ops_test.model.applications[s3_integrator_app_name].set_config(config)
+ action = await ops_test.model.units.get(f"{s3_integrator_app_name}/0").run_action(
+ "sync-s3-credentials",
+ **credentials,
+ )
+ await action.wait()
+
+ logger.info("integrating s3-integrator with postgresql and waiting model to stabilize")
+ await ops_test.model.relate(database_app_name, s3_integrator_app_name)
+ async with ops_test.fast_forward(fast_interval="60s"):
+ await ops_test.model.wait_for_idle(status="active", timeout=1000)
+
+ primary = await get_primary(ops_test, f"{database_app_name}/0")
+ for unit in ops_test.model.applications[database_app_name].units:
+ if unit.name != primary:
+ replica = unit.name
+ break
+ password = await get_password(ops_test, primary)
+ address = get_unit_address(ops_test, primary)
+
+ logger.info("1: creating table")
+ _create_table(address, password)
+
+ logger.info("1: creating backup b1")
+ action = await ops_test.model.units.get(replica).run_action("create-backup")
+ await action.wait()
+ backup_status = action.results.get("backup-status")
+ assert backup_status, "backup hasn't succeeded"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000)
+ backup_b1 = await _get_most_recent_backup(ops_test, ops_test.model.units.get(replica))
+
+ logger.info("1: creating test data td1")
+ _insert_test_data("test_data_td1", address, password)
+
+ logger.info("1: get timestamp ts1")
+ with db_connect(host=address, password=password) as connection, connection.cursor() as cursor:
+ cursor.execute("SELECT current_timestamp;")
+ timestamp_ts1 = str(cursor.fetchone()[0])
+ connection.close()
+ # Wrong timestamp pointing to one year ahead
+ unreachable_timestamp_ts1 = timestamp_ts1.replace(
+ timestamp_ts1[:4], str(int(timestamp_ts1[:4]) + 1), 1
+ )
+
+ logger.info("1: creating test data td2")
+ _insert_test_data("test_data_td2", address, password)
+
+ logger.info("1: switching wal")
+ _switch_wal(address, password)
+
+ logger.info("1: scaling down to do restore")
+ async with ops_test.fast_forward():
+ await ops_test.model.destroy_unit(replica)
+ await ops_test.model.wait_for_idle(status="active", timeout=1000)
+ for unit in ops_test.model.applications[database_app_name].units:
+ remaining_unit = unit
+ break
+
+ logger.info("1: restoring the backup b1 with bad restore-to-time parameter")
+ action = await remaining_unit.run_action(
+ "restore", **{"backup-id": backup_b1, "restore-to-time": "bad data"}
+ )
+ await action.wait()
+ assert action.status == "failed", (
+ "1: restore must fail with bad restore-to-time parameter, but that action succeeded"
+ )
+
+ logger.info("1: restoring the backup b1 with unreachable restore-to-time parameter")
+ action = await remaining_unit.run_action(
+ "restore", **{"backup-id": backup_b1, "restore-to-time": unreachable_timestamp_ts1}
+ )
+ await action.wait()
+ logger.info("1: waiting for the database charm to become blocked after restore")
+ async with ops_test.fast_forward():
+ await ops_test.model.block_until(
+ lambda: remaining_unit.workload_status_message == CANNOT_RESTORE_PITR,
+ timeout=1000,
+ )
+ logger.info(
+ "1: database charm become in blocked state after restore, as supposed to be with unreachable PITR parameter"
+ )
+
+ for attempt in Retrying(
+ stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30)
+ ):
+ with attempt:
+ logger.info("1: restoring to the timestamp ts1")
+ action = await remaining_unit.run_action(
+ "restore", **{"restore-to-time": timestamp_ts1}
+ )
+ await action.wait()
+ restore_status = action.results.get("restore-status")
+ assert restore_status, "1: restore to the timestamp ts1 hasn't succeeded"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)
+
+ logger.info("2: successful restore")
+ primary = await get_primary(ops_test, remaining_unit.name)
+ address = get_unit_address(ops_test, primary)
+ timeline_t2 = await _get_most_recent_backup(ops_test, remaining_unit)
+ assert backup_b1 != timeline_t2, "2: timeline 2 do not exist in list-backups action or bad"
+
+ logger.info("2: checking test data td1")
+ assert _check_test_data("test_data_td1", address, password), "2: test data td1 should exist"
+
+ logger.info("2: checking not test data td2")
+ assert not _check_test_data("test_data_td2", address, password), (
+ "2: test data td2 shouldn't exist"
+ )
+
+ logger.info("2: creating test data td3")
+ _insert_test_data("test_data_td3", address, password)
+
+ logger.info("2: get timestamp ts2")
+ with db_connect(host=address, password=password) as connection, connection.cursor() as cursor:
+ cursor.execute("SELECT current_timestamp;")
+ timestamp_ts2 = str(cursor.fetchone()[0])
+ connection.close()
+
+ logger.info("2: creating test data td4")
+ _insert_test_data("test_data_td4", address, password)
+
+ logger.info("2: switching wal")
+ _switch_wal(address, password)
+
+ for attempt in Retrying(
+ stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30)
+ ):
+ with attempt:
+ logger.info("2: restoring the backup b1 to the latest")
+ action = await remaining_unit.run_action(
+ "restore", **{"backup-id": backup_b1, "restore-to-time": "latest"}
+ )
+ await action.wait()
+ restore_status = action.results.get("restore-status")
+ assert restore_status, "2: restore the backup b1 to the latest hasn't succeeded"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)
+
+ logger.info("3: successful restore")
+ primary = await get_primary(ops_test, remaining_unit.name)
+ address = get_unit_address(ops_test, primary)
+ timeline_t3 = await _get_most_recent_backup(ops_test, remaining_unit)
+ assert backup_b1 != timeline_t3 and timeline_t2 != timeline_t3, (
+ "3: timeline 3 do not exist in list-backups action or bad"
+ )
+
+ logger.info("3: checking test data td1")
+ assert _check_test_data("test_data_td1", address, password), "3: test data td1 should exist"
+
+ logger.info("3: checking test data td2")
+ assert _check_test_data("test_data_td2", address, password), "3: test data td2 should exist"
+
+ logger.info("3: checking not test data td3")
+ assert not _check_test_data("test_data_td3", address, password), (
+ "3: test data td3 shouldn't exist"
+ )
+
+ logger.info("3: checking not test data td4")
+ assert not _check_test_data("test_data_td4", address, password), (
+ "3: test data td4 shouldn't exist"
+ )
+
+ logger.info("3: switching wal")
+ _switch_wal(address, password)
+
+ for attempt in Retrying(
+ stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30)
+ ):
+ with attempt:
+ logger.info("3: restoring the timeline 2 to the latest")
+ action = await remaining_unit.run_action(
+ "restore", **{"backup-id": timeline_t2, "restore-to-time": "latest"}
+ )
+ await action.wait()
+ restore_status = action.results.get("restore-status")
+ assert restore_status, "3: restore the timeline 2 to the latest hasn't succeeded"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)
+
+ logger.info("4: successful restore")
+ primary = await get_primary(ops_test, remaining_unit.name)
+ address = get_unit_address(ops_test, primary)
+ timeline_t4 = await _get_most_recent_backup(ops_test, remaining_unit)
+ assert (
+ backup_b1 != timeline_t4 and timeline_t2 != timeline_t4 and timeline_t3 != timeline_t4
+ ), "4: timeline 4 do not exist in list-backups action or bad"
+
+ logger.info("4: checking test data td1")
+ assert _check_test_data("test_data_td1", address, password), "4: test data td1 should exist"
+
+ logger.info("4: checking not test data td2")
+ assert not _check_test_data("test_data_td2", address, password), (
+ "4: test data td2 shouldn't exist"
+ )
+
+ logger.info("4: checking test data td3")
+ assert _check_test_data("test_data_td3", address, password), "4: test data td3 should exist"
+
+ logger.info("4: checking test data td4")
+ assert _check_test_data("test_data_td4", address, password), "4: test data td4 should exist"
+
+ logger.info("4: switching wal")
+ _switch_wal(address, password)
+
+ for attempt in Retrying(
+ stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=2, max=30)
+ ):
+ with attempt:
+ logger.info("4: restoring to the timestamp ts2")
+ action = await remaining_unit.run_action(
+ "restore", **{"restore-to-time": timestamp_ts2}
+ )
+ await action.wait()
+ restore_status = action.results.get("restore-status")
+ assert restore_status, "4: restore to the timestamp ts2 hasn't succeeded"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000, idle_period=30)
+
+ logger.info("5: successful restore")
+ primary = await get_primary(ops_test, remaining_unit.name)
+ address = get_unit_address(ops_test, primary)
+ timeline_t5 = await _get_most_recent_backup(ops_test, remaining_unit)
+ assert (
+ backup_b1 != timeline_t5
+ and timeline_t2 != timeline_t5
+ and timeline_t3 != timeline_t5
+ and timeline_t4 != timeline_t5
+ ), "5: timeline 5 do not exist in list-backups action or bad"
+
+ logger.info("5: checking test data td1")
+ assert _check_test_data("test_data_td1", address, password), "5: test data td1 should exist"
+
+ logger.info("5: checking not test data td2")
+ assert not _check_test_data("test_data_td2", address, password), (
+ "5: test data td2 shouldn't exist"
+ )
+
+ logger.info("5: checking test data td3")
+ assert _check_test_data("test_data_td3", address, password), "5: test data td3 should exist"
+
+ logger.info("5: checking not test data td4")
+ assert not _check_test_data("test_data_td4", address, password), (
+ "5: test data td4 shouldn't exist"
+ )
+
+ # Remove the database app.
+ await ops_test.model.remove_application(database_app_name, block_until_done=True)
+ # Remove the TLS operator.
+ await ops_test.model.remove_application(tls_certificates_app_name, block_until_done=True)
+
+
+@pytest.mark.abort_on_fail
+async def test_pitr_backup_gcp(ops_test: OpsTest, cloud_configs: tuple[dict, dict], charm) -> None:
+ """Build, deploy two units of PostgreSQL and do backup in GCP. Then, write new data into DB, switch WAL file and test point-in-time-recovery restore action."""
+ config = cloud_configs[0][GCP]
+ credentials = cloud_configs[1][GCP]
+
+ await pitr_backup_operations(
+ ops_test,
+ S3_INTEGRATOR_APP_NAME,
+ TLS_CERTIFICATES_APP_NAME,
+ TLS_CONFIG,
+ TLS_CHANNEL,
+ credentials,
+ GCP,
+ config,
+ charm,
+ )
+
+
+def _create_table(host: str, password: str):
+ with db_connect(host=host, password=password) as connection:
+ connection.autocommit = True
+ connection.cursor().execute("CREATE TABLE IF NOT EXISTS backup_table (test_column TEXT);")
+ connection.close()
+
+
+def _insert_test_data(td: str, host: str, password: str):
+ with db_connect(host=host, password=password) as connection:
+ connection.autocommit = True
+ connection.cursor().execute(
+ "INSERT INTO backup_table (test_column) VALUES (%s);",
+ (td,),
+ )
+ connection.close()
+
+
+def _check_test_data(td: str, host: str, password: str) -> bool:
+ with db_connect(host=host, password=password) as connection, connection.cursor() as cursor:
+ cursor.execute(
+ "SELECT EXISTS (SELECT 1 FROM backup_table WHERE test_column = %s);",
+ (td,),
+ )
+ res = cursor.fetchone()[0]
+ connection.close()
+ return res
+
+
+def _switch_wal(host: str, password: str):
+ with db_connect(host=host, password=password) as connection:
+ connection.autocommit = True
+ connection.cursor().execute("SELECT pg_switch_wal();")
+ connection.close()
+
+
+async def _get_most_recent_backup(ops_test: OpsTest, unit: any) -> str:
+ logger.info("listing the available backups")
+ action = await unit.run_action("list-backups")
+ await action.wait()
+ backups = action.results.get("backups")
+ assert backups, "backups not outputted"
+ await ops_test.model.wait_for_idle(status="active", timeout=1000)
+ most_recent_backup = backups.split("\n")[-1]
+ return most_recent_backup.split()[0]
diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py
index 53ac76e4b2..743bbdb242 100644
--- a/tests/integration/test_charm.py
+++ b/tests/integration/test_charm.py
@@ -37,7 +37,6 @@
UNIT_IDS = [0, 1, 2]
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.skip_if_deployed
async def test_deploy(ops_test: OpsTest, charm: str):
@@ -61,7 +60,6 @@ async def test_deploy(ops_test: OpsTest, charm: str):
assert ops_test.model.applications[DATABASE_APP_NAME].units[0].workload_status == "active"
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.parametrize("unit_id", UNIT_IDS)
async def test_database_is_up(ops_test: OpsTest, unit_id: int):
@@ -72,7 +70,6 @@ async def test_database_is_up(ops_test: OpsTest, unit_id: int):
assert result.status_code == 200
-@pytest.mark.group(1)
@pytest.mark.parametrize("unit_id", UNIT_IDS)
async def test_exporter_is_up(ops_test: OpsTest, unit_id: int):
# Query Patroni REST API and check the status that indicates
@@ -85,7 +82,6 @@ async def test_exporter_is_up(ops_test: OpsTest, unit_id: int):
)
-@pytest.mark.group(1)
@pytest.mark.parametrize("unit_id", UNIT_IDS)
async def test_settings_are_correct(ops_test: OpsTest, unit_id: int):
# Connect to the PostgreSQL instance.
@@ -171,7 +167,6 @@ async def test_settings_are_correct(ops_test: OpsTest, unit_id: int):
assert unit.data["port-ranges"][0]["protocol"] == "tcp"
-@pytest.mark.group(1)
async def test_postgresql_locales(ops_test: OpsTest) -> None:
raw_locales = await run_command_on_unit(
ops_test,
@@ -188,7 +183,6 @@ async def test_postgresql_locales(ops_test: OpsTest) -> None:
assert locales == SNAP_LOCALES
-@pytest.mark.group(1)
async def test_postgresql_parameters_change(ops_test: OpsTest) -> None:
"""Test that's possible to change PostgreSQL parameters."""
await ops_test.model.applications[DATABASE_APP_NAME].set_config({
@@ -236,7 +230,6 @@ async def test_postgresql_parameters_change(ops_test: OpsTest) -> None:
connection.close()
-@pytest.mark.group(1)
async def test_scale_down_and_up(ops_test: OpsTest):
"""Test data is replicated to new units after a scale up."""
# Ensure the initial number of units in the application.
@@ -324,7 +317,6 @@ async def test_scale_down_and_up(ops_test: OpsTest):
await scale_application(ops_test, DATABASE_APP_NAME, initial_scale)
-@pytest.mark.group(1)
async def test_switchover_sync_standby(ops_test: OpsTest):
original_roles = await get_cluster_roles(
ops_test, ops_test.model.applications[DATABASE_APP_NAME].units[0].name
@@ -342,7 +334,6 @@ async def test_switchover_sync_standby(ops_test: OpsTest):
assert new_roles["primaries"][0] == original_roles["sync_standbys"][0]
-@pytest.mark.group(1)
async def test_persist_data_through_primary_deletion(ops_test: OpsTest):
"""Test data persists through a primary deletion."""
# Set a composite application name in order to test in more than one series at the same time.
diff --git a/tests/integration/test_config.py b/tests/integration/test_config.py
index 622264c6c4..304c8e9efc 100644
--- a/tests/integration/test_config.py
+++ b/tests/integration/test_config.py
@@ -15,13 +15,11 @@
logger = logging.getLogger(__name__)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_config_parameters(ops_test: OpsTest) -> None:
+async def test_config_parameters(ops_test: OpsTest, charm) -> None:
"""Build and deploy one unit of PostgreSQL and then test config with wrong parameters."""
# Build and deploy the PostgreSQL charm.
async with ops_test.fast_forward():
- charm = await ops_test.build_charm(".")
await ops_test.model.deploy(
charm,
num_units=1,
diff --git a/tests/integration/test_db.py b/tests/integration/test_db.py
index 88f87c1536..5d6195a8e2 100644
--- a/tests/integration/test_db.py
+++ b/tests/integration/test_db.py
@@ -41,7 +41,6 @@
)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_mailman3_core_db(ops_test: OpsTest, charm: str) -> None:
"""Deploy Mailman3 Core to test the 'db' relation."""
@@ -109,7 +108,6 @@ async def test_mailman3_core_db(ops_test: OpsTest, charm: str) -> None:
assert domain_name not in [domain.mail_host for domain in client.domains]
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_relation_data_is_updated_correctly_when_scaling(ops_test: OpsTest):
"""Test that relation data, like connection data, is updated correctly when scaling."""
@@ -192,7 +190,6 @@ async def test_relation_data_is_updated_correctly_when_scaling(ops_test: OpsTest
psycopg2.connect(primary_connection_string)
-@pytest.mark.group(1)
async def test_roles_blocking(ops_test: OpsTest, charm: str) -> None:
await ops_test.model.deploy(
APPLICATION_NAME,
@@ -250,7 +247,6 @@ async def test_roles_blocking(ops_test: OpsTest, charm: str) -> None:
)
-@pytest.mark.group(1)
async def test_extensions_blocking(ops_test: OpsTest, charm: str) -> None:
await asyncio.gather(
ops_test.model.applications[APPLICATION_NAME].set_config({"legacy_roles": "False"}),
@@ -292,8 +288,7 @@ async def test_extensions_blocking(ops_test: OpsTest, charm: str) -> None:
@markers.juju2
-@pytest.mark.group(1)
-@pytest.mark.unstable
+@pytest.mark.skip(reason="Unstable")
@markers.amd64_only # canonical-livepatch-server charm (in bundle) not available for arm64
async def test_canonical_livepatch_onprem_bundle_db(ops_test: OpsTest) -> None:
# Deploy and test the Livepatch onprem bundle (using this PostgreSQL charm
diff --git a/tests/integration/test_db_admin.py b/tests/integration/test_db_admin.py
index b95d38d70d..763b7a93cb 100644
--- a/tests/integration/test_db_admin.py
+++ b/tests/integration/test_db_admin.py
@@ -37,7 +37,6 @@
RELATION_NAME = "db-admin"
-@pytest.mark.group(1)
async def test_landscape_scalable_bundle_db(ops_test: OpsTest, charm: str) -> None:
"""Deploy Landscape Scalable Bundle to test the 'db-admin' relation."""
await ops_test.model.deploy(
diff --git a/tests/integration/test_password_rotation.py b/tests/integration/test_password_rotation.py
index 0cf2f6c26c..563626b229 100644
--- a/tests/integration/test_password_rotation.py
+++ b/tests/integration/test_password_rotation.py
@@ -27,12 +27,10 @@
APP_NAME = METADATA["name"]
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.skip_if_deployed
-async def test_deploy_active(ops_test: OpsTest):
+async def test_deploy_active(ops_test: OpsTest, charm):
"""Build the charm and deploy it."""
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
@@ -44,7 +42,6 @@ async def test_deploy_active(ops_test: OpsTest):
await ops_test.model.wait_for_idle(apps=[APP_NAME], status="active", timeout=1500)
-@pytest.mark.group(1)
async def test_password_rotation(ops_test: OpsTest):
"""Test password rotation action."""
# Get the initial passwords set for the system users.
@@ -120,7 +117,6 @@ async def test_password_rotation(ops_test: OpsTest):
assert check_patroni(ops_test, unit.name, restart_time)
-@pytest.mark.group(1)
@markers.juju_secrets
async def test_password_from_secret_same_as_cli(ops_test: OpsTest):
"""Checking if password is same as returned by CLI.
@@ -147,7 +143,6 @@ async def test_password_from_secret_same_as_cli(ops_test: OpsTest):
assert data[secret_id]["content"]["Data"]["replication-password"] == password
-@pytest.mark.group(1)
async def test_empty_password(ops_test: OpsTest) -> None:
"""Test that the password can't be set to an empty string."""
leader_unit = await get_leader_unit(ops_test, APP_NAME)
@@ -160,7 +155,6 @@ async def test_empty_password(ops_test: OpsTest) -> None:
assert password == "None"
-@pytest.mark.group(1)
async def test_db_connection_with_empty_password(ops_test: OpsTest):
"""Test that user can't connect with empty password."""
primary = await get_primary(ops_test, f"{APP_NAME}/0")
@@ -169,7 +163,6 @@ async def test_db_connection_with_empty_password(ops_test: OpsTest):
connection.close()
-@pytest.mark.group(1)
async def test_no_password_change_on_invalid_password(ops_test: OpsTest) -> None:
"""Test that in general, there is no change when password validation fails."""
leader_unit = await get_leader_unit(ops_test, APP_NAME)
@@ -182,7 +175,6 @@ async def test_no_password_change_on_invalid_password(ops_test: OpsTest) -> None
assert password1 == password2
-@pytest.mark.group(1)
async def test_no_password_exposed_on_logs(ops_test: OpsTest) -> None:
"""Test that passwords don't get exposed on postgresql logs."""
for unit in ops_test.model.applications[APP_NAME].units:
diff --git a/tests/integration/test_plugins.py b/tests/integration/test_plugins.py
index 7b3d5d3ce1..a24c963000 100644
--- a/tests/integration/test_plugins.py
+++ b/tests/integration/test_plugins.py
@@ -88,13 +88,11 @@
TIMESCALEDB_EXTENSION_STATEMENT = "CREATE TABLE test_timescaledb (time TIMESTAMPTZ NOT NULL); SELECT create_hypertable('test_timescaledb', 'time');"
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_plugins(ops_test: OpsTest) -> None:
+async def test_plugins(ops_test: OpsTest, charm) -> None:
"""Build and deploy one unit of PostgreSQL and then test the available plugins."""
# Build and deploy the PostgreSQL charm.
async with ops_test.fast_forward():
- charm = await ops_test.build_charm(".")
await ops_test.model.deploy(
charm,
num_units=2,
@@ -210,7 +208,6 @@ def enable_disable_config(enabled: False):
connection.close()
-@pytest.mark.group(1)
async def test_plugin_objects(ops_test: OpsTest) -> None:
"""Checks if charm gets blocked when trying to disable a plugin in use."""
primary = await get_primary(ops_test, f"{DATABASE_APP_NAME}/0")
diff --git a/tests/integration/test_subordinates.py b/tests/integration/test_subordinates.py
index be9be926cc..c03288ae36 100644
--- a/tests/integration/test_subordinates.py
+++ b/tests/integration/test_subordinates.py
@@ -3,6 +3,7 @@
# See LICENSE file for licensing details.
import logging
+import os
from asyncio import gather
import pytest
@@ -20,9 +21,8 @@
logger = logging.getLogger(__name__)
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
-async def test_deploy(ops_test: OpsTest, charm: str, github_secrets):
+async def test_deploy(ops_test: OpsTest, charm: str):
await gather(
ops_test.model.deploy(
charm,
@@ -32,7 +32,7 @@ async def test_deploy(ops_test: OpsTest, charm: str, github_secrets):
),
ops_test.model.deploy(
UBUNTU_PRO_APP_NAME,
- config={"token": github_secrets["UBUNTU_PRO_TOKEN"]},
+ config={"token": os.environ["UBUNTU_PRO_TOKEN"]},
channel="latest/edge",
num_units=0,
base=CHARM_BASE,
@@ -40,8 +40,8 @@ async def test_deploy(ops_test: OpsTest, charm: str, github_secrets):
ops_test.model.deploy(
LS_CLIENT,
config={
- "account-name": github_secrets["LANDSCAPE_ACCOUNT_NAME"],
- "registration-key": github_secrets["LANDSCAPE_REGISTRATION_KEY"],
+ "account-name": os.environ["LANDSCAPE_ACCOUNT_NAME"],
+ "registration-key": os.environ["LANDSCAPE_REGISTRATION_KEY"],
"ppa": "ppa:landscape/self-hosted-beta",
},
channel="latest/edge",
@@ -60,8 +60,7 @@ async def test_deploy(ops_test: OpsTest, charm: str, github_secrets):
)
-@pytest.mark.group(1)
-async def test_scale_up(ops_test: OpsTest, github_secrets):
+async def test_scale_up(ops_test: OpsTest):
await scale_application(ops_test, DATABASE_APP_NAME, 4)
await ops_test.model.wait_for_idle(
@@ -69,8 +68,7 @@ async def test_scale_up(ops_test: OpsTest, github_secrets):
)
-@pytest.mark.group(1)
-async def test_scale_down(ops_test: OpsTest, github_secrets):
+async def test_scale_down(ops_test: OpsTest):
await scale_application(ops_test, DATABASE_APP_NAME, 3)
await ops_test.model.wait_for_idle(
diff --git a/tests/integration/test_tls.py b/tests/integration/test_tls.py
index 7408a8352f..31053c4677 100644
--- a/tests/integration/test_tls.py
+++ b/tests/integration/test_tls.py
@@ -41,12 +41,10 @@
tls_config = {"ca-common-name": "Test CA"}
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
@pytest.mark.skip_if_deployed
-async def test_deploy_active(ops_test: OpsTest):
+async def test_deploy_active(ops_test: OpsTest, charm):
"""Build the charm and deploy it."""
- charm = await ops_test.build_charm(".")
async with ops_test.fast_forward():
await ops_test.model.deploy(
charm,
@@ -59,7 +57,6 @@ async def test_deploy_active(ops_test: OpsTest):
# bundles don't wait between deploying charms.
-@pytest.mark.group(1)
@pytest.mark.abort_on_fail
async def test_tls_enabled(ops_test: OpsTest) -> None:
"""Test that TLS is enabled when relating to the TLS Certificates Operator."""
diff --git a/tests/spread/test_async_replication.py/task.yaml b/tests/spread/test_async_replication.py/task.yaml
new file mode 100644
index 0000000000..4fbf3b6b36
--- /dev/null
+++ b/tests/spread/test_async_replication.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_async_replication.py
+environment:
+ TEST_MODULE: ha_tests/test_async_replication.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+variants:
+ - -juju29
diff --git a/tests/spread/test_audit.py/task.yaml b/tests/spread/test_audit.py/task.yaml
new file mode 100644
index 0000000000..9cbc84e43d
--- /dev/null
+++ b/tests/spread/test_audit.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_audit.py
+environment:
+ TEST_MODULE: test_audit.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_backups_aws.py/task.yaml b/tests/spread/test_backups_aws.py/task.yaml
new file mode 100644
index 0000000000..c7eb541232
--- /dev/null
+++ b/tests/spread/test_backups_aws.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backups_aws.py
+environment:
+ TEST_MODULE: test_backups_aws.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+backends:
+ - -lxd-vm # Requires CI secrets
diff --git a/tests/spread/test_backups_ceph.py/task.yaml b/tests/spread/test_backups_ceph.py/task.yaml
new file mode 100644
index 0000000000..8f6c8a387d
--- /dev/null
+++ b/tests/spread/test_backups_ceph.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backups_ceph.py
+environment:
+ TEST_MODULE: test_backups_ceph.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+systems:
+ - -ubuntu-24.04-arm
diff --git a/tests/spread/test_backups_gcp.py/task.yaml b/tests/spread/test_backups_gcp.py/task.yaml
new file mode 100644
index 0000000000..c0dc3ac976
--- /dev/null
+++ b/tests/spread/test_backups_gcp.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backups_gcp.py
+environment:
+ TEST_MODULE: test_backups_gcp.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+backends:
+ - -lxd-vm # Requires CI secrets
diff --git a/tests/spread/test_backups_pitr_aws.py/task.yaml b/tests/spread/test_backups_pitr_aws.py/task.yaml
new file mode 100644
index 0000000000..4ac59fbf85
--- /dev/null
+++ b/tests/spread/test_backups_pitr_aws.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backups_pitr_aws.py
+environment:
+ TEST_MODULE: test_backups_pitr_aws.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+backends:
+ - -lxd-vm # Requires CI secrets
diff --git a/tests/spread/test_backups_pitr_gcp.py/task.yaml b/tests/spread/test_backups_pitr_gcp.py/task.yaml
new file mode 100644
index 0000000000..a6b31a59a6
--- /dev/null
+++ b/tests/spread/test_backups_pitr_gcp.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_backups_pitr_gcp.py
+environment:
+ TEST_MODULE: test_backups_pitr_gcp.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+backends:
+ - -lxd-vm # Requires CI secrets
diff --git a/tests/spread/test_charm.py/task.yaml b/tests/spread/test_charm.py/task.yaml
new file mode 100644
index 0000000000..96450bdc32
--- /dev/null
+++ b/tests/spread/test_charm.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_charm.py
+environment:
+ TEST_MODULE: test_charm.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_config.py/task.yaml b/tests/spread/test_config.py/task.yaml
new file mode 100644
index 0000000000..f330f89b38
--- /dev/null
+++ b/tests/spread/test_config.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_config.py
+environment:
+ TEST_MODULE: test_config.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_db.py/task.yaml b/tests/spread/test_db.py/task.yaml
new file mode 100644
index 0000000000..a560e14b8a
--- /dev/null
+++ b/tests/spread/test_db.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_db.py
+environment:
+ TEST_MODULE: test_db.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_db_admin.py/task.yaml b/tests/spread/test_db_admin.py/task.yaml
new file mode 100644
index 0000000000..b5f127b98c
--- /dev/null
+++ b/tests/spread/test_db_admin.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_db_admin.py
+environment:
+ TEST_MODULE: test_db_admin.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_new_relations_1.py/task.yaml b/tests/spread/test_new_relations_1.py/task.yaml
new file mode 100644
index 0000000000..0c64fe771f
--- /dev/null
+++ b/tests/spread/test_new_relations_1.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_new_relations_1.py
+environment:
+ TEST_MODULE: new_relations/test_new_relations_1.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_new_relations_2.py/task.yaml b/tests/spread/test_new_relations_2.py/task.yaml
new file mode 100644
index 0000000000..0b7af326a4
--- /dev/null
+++ b/tests/spread/test_new_relations_2.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_new_relations_2.py
+environment:
+ TEST_MODULE: new_relations/test_new_relations_2.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+systems:
+ - -ubuntu-24.04-arm
diff --git a/tests/spread/test_password_rotation.py/task.yaml b/tests/spread/test_password_rotation.py/task.yaml
new file mode 100644
index 0000000000..439559b4e6
--- /dev/null
+++ b/tests/spread/test_password_rotation.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_password_rotation.py
+environment:
+ TEST_MODULE: test_password_rotation.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_plugins.py/task.yaml b/tests/spread/test_plugins.py/task.yaml
new file mode 100644
index 0000000000..e9dce8e28f
--- /dev/null
+++ b/tests/spread/test_plugins.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_plugins.py
+environment:
+ TEST_MODULE: test_plugins.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_relations.py/task.yaml b/tests/spread/test_relations.py/task.yaml
new file mode 100644
index 0000000000..a1c60423eb
--- /dev/null
+++ b/tests/spread/test_relations.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_relations.py
+environment:
+ TEST_MODULE: relations/test_relations.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_relations_coherence.py/task.yaml b/tests/spread/test_relations_coherence.py/task.yaml
new file mode 100644
index 0000000000..bff0e492b3
--- /dev/null
+++ b/tests/spread/test_relations_coherence.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_relations_coherence.py
+environment:
+ TEST_MODULE: new_relations/test_relations_coherence.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_replication.py/task.yaml b/tests/spread/test_replication.py/task.yaml
new file mode 100644
index 0000000000..237cc3981b
--- /dev/null
+++ b/tests/spread/test_replication.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_replication.py
+environment:
+ TEST_MODULE: ha_tests/test_replication.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_restore_cluster.py/task.yaml b/tests/spread/test_restore_cluster.py/task.yaml
new file mode 100644
index 0000000000..bce2ec14d4
--- /dev/null
+++ b/tests/spread/test_restore_cluster.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_restore_cluster.py
+environment:
+ TEST_MODULE: ha_tests/test_restore_cluster.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_scaling.py/task.yaml b/tests/spread/test_scaling.py/task.yaml
new file mode 100644
index 0000000000..32358243db
--- /dev/null
+++ b/tests/spread/test_scaling.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_scaling.py
+environment:
+ TEST_MODULE: ha_tests/test_scaling.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+variants:
+ - -juju29
diff --git a/tests/spread/test_scaling_three_units.py/task.yaml b/tests/spread/test_scaling_three_units.py/task.yaml
new file mode 100644
index 0000000000..ae8dcc1006
--- /dev/null
+++ b/tests/spread/test_scaling_three_units.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_scaling_three_units.py
+environment:
+ TEST_MODULE: ha_tests/test_scaling_three_units.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+variants:
+ - -juju29
diff --git a/tests/spread/test_self_healing.py/task.yaml b/tests/spread/test_self_healing.py/task.yaml
new file mode 100644
index 0000000000..d8fca3acea
--- /dev/null
+++ b/tests/spread/test_self_healing.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_self_healing.py
+environment:
+ TEST_MODULE: ha_tests/test_self_healing.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_smoke.py/task.yaml b/tests/spread/test_smoke.py/task.yaml
new file mode 100644
index 0000000000..d2fe9793d1
--- /dev/null
+++ b/tests/spread/test_smoke.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_smoke.py
+environment:
+ TEST_MODULE: ha_tests/test_smoke.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_subordinates.py/task.yaml b/tests/spread/test_subordinates.py/task.yaml
new file mode 100644
index 0000000000..a7477d7bab
--- /dev/null
+++ b/tests/spread/test_subordinates.py/task.yaml
@@ -0,0 +1,9 @@
+summary: test_subordinates.py
+environment:
+ TEST_MODULE: test_subordinates.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
+backends:
+ - -lxd-vm # Requires CI secrets
diff --git a/tests/spread/test_tls.py/task.yaml b/tests/spread/test_tls.py/task.yaml
new file mode 100644
index 0000000000..a605744913
--- /dev/null
+++ b/tests/spread/test_tls.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_tls.py
+environment:
+ TEST_MODULE: test_tls.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_upgrade.py/task.yaml b/tests/spread/test_upgrade.py/task.yaml
new file mode 100644
index 0000000000..b3be366921
--- /dev/null
+++ b/tests/spread/test_upgrade.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_upgrade.py
+environment:
+ TEST_MODULE: ha_tests/test_upgrade.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tests/spread/test_upgrade_from_stable.py/task.yaml b/tests/spread/test_upgrade_from_stable.py/task.yaml
new file mode 100644
index 0000000000..047617ab39
--- /dev/null
+++ b/tests/spread/test_upgrade_from_stable.py/task.yaml
@@ -0,0 +1,7 @@
+summary: test_upgrade_from_stable.py
+environment:
+ TEST_MODULE: ha_tests/test_upgrade_from_stable.py
+execute: |
+ tox run -e integration -- "tests/integration/$TEST_MODULE" --model testing --alluredir="$SPREAD_TASK/allure-results"
+artifacts:
+ - allure-results
diff --git a/tox.ini b/tox.ini
index 508d1a645f..0f7b4d4bd4 100644
--- a/tox.ini
+++ b/tox.ini
@@ -57,8 +57,13 @@ commands =
description = Run integration tests
pass_env =
CI
- GITHUB_OUTPUT
- SECRETS_FROM_GITHUB
+ AWS_ACCESS_KEY
+ AWS_SECRET_KEY
+ GCP_ACCESS_KEY
+ GCP_SECRET_KEY
+ UBUNTU_PRO_TOKEN
+ LANDSCAPE_ACCOUNT_NAME
+ LANDSCAPE_REGISTRATION_KEY
commands_pre =
poetry install --only integration --no-root
commands =