From 4eb2e672eeee051c69a0710d0d8fa6f0f0fd4ff2 Mon Sep 17 00:00:00 2001 From: ananthu-kuttattu Date: Mon, 19 Sep 2022 12:28:47 +0530 Subject: [PATCH 1/2] Ansible modules for PowerFlex release version 1.4.0 --- ChangeLog.md | 6 + README.md | 140 +-- docs/ADOPTERS.md | 11 + docs/BRANCHING.md | 32 + docs/CODE_OF_CONDUCT.md | 137 ++ docs/COMMITTER_GUIDE.md | 49 + docs/CONTRIBUTING.md | 173 +++ docs/INSTALLATION.md | 106 ++ docs/ISSUE_TRIAGE.md | 308 +++++ docs/MAINTAINERS.md | 17 + docs/MAINTAINER_GUIDE.md | 38 + docs/Product Guide.md | 1115 ++++++++++------- docs/Release Notes.md | 29 +- docs/SECURITY.md | 22 + docs/SUPPORT.md | 12 + galaxy.yml | 9 +- meta/execution-environment.yml | 4 +- plugins/module_utils/storage/dell/utils.py | 6 +- plugins/modules/info.py | 580 ++++++++- plugins/modules/storagepool.py | 281 +++++ plugins/modules/volume.py | 69 +- .../plugins/module_utils/mock_info_api.py | 229 ++++ .../module_utils/mock_storagepool_api.py | 467 +++++++ .../plugins/module_utils/mock_volume_api.py | 548 ++++++++ tests/unit/plugins/modules/test_info.py | 109 ++ .../unit/plugins/modules/test_storagepool.py | 73 ++ tests/unit/plugins/modules/test_volume.py | 82 ++ 27 files changed, 4070 insertions(+), 582 deletions(-) create mode 100644 docs/ADOPTERS.md create mode 100644 docs/BRANCHING.md create mode 100644 docs/CODE_OF_CONDUCT.md create mode 100644 docs/COMMITTER_GUIDE.md create mode 100644 docs/CONTRIBUTING.md create mode 100644 docs/INSTALLATION.md create mode 100644 docs/ISSUE_TRIAGE.md create mode 100644 docs/MAINTAINERS.md create mode 100644 docs/MAINTAINER_GUIDE.md create mode 100644 docs/SECURITY.md create mode 100644 docs/SUPPORT.md create mode 100644 tests/unit/plugins/module_utils/mock_info_api.py create mode 100644 tests/unit/plugins/module_utils/mock_storagepool_api.py create mode 100644 tests/unit/plugins/module_utils/mock_volume_api.py create mode 100644 tests/unit/plugins/modules/test_info.py create mode 100644 tests/unit/plugins/modules/test_storagepool.py create mode 100644 tests/unit/plugins/modules/test_volume.py diff --git a/ChangeLog.md b/ChangeLog.md index 4e39d2c..a18a0d8 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,5 +1,11 @@ # ansible-powerflex Change Log +## Version 1.4.0 - released on 27/09/22 +- Info module is enhanced to support the listing volumes and storage pools with statistics data​. +- Storage pool module is enhanced to get the details with statistics data​. +- Volume module is enhanced to get the details with statistics data​. +- Added support for 4.0.x release of PowerFlex OS. + ## Version 1.3.0 - released on 28/06/22 - Added operations like Add/remove standby mdm, rename mdm, change mdm cluster ownership, switch mdm cluster mode, set performance profile, modify virtual IP interfaces and Get high level details of MDM cluster. - Added execution environment manifest file to support building an execution environment with ansible-builder. diff --git a/README.md b/README.md index b426735..4536d50 100644 --- a/README.md +++ b/README.md @@ -4,126 +4,50 @@ The Ansible Modules for Dell Technologies (Dell) PowerFlex allow Data Center and The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, storage pools, SDSs, devices, protection domains, MDM cluster, and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request. -## License -The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/1.3.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/1.3.0/MODULE-LICENSE) for the full terms. - -## Support -The Ansible collection for PowerFlex is supported by Dell and is provided under the terms of the license attached to the collection. Please see the [LICENSE](#license) section for the full terms. Dell does not provide any support for the source code modifications. For any Ansible modules issues, questions or feedback, join the [Dell Automation Community](https://www.dell.com/community/Automation/bd-p/Automation). +## Table of contents + +* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/CODE_OF_CONDUCT.md) +* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/MAINTAINER_GUIDE.md) +* [Committer guide](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/COMMITTER_GUIDE.md) +* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/CONTRIBUTING.md) +* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/BRANCHING.md) +* [List of adopters](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/ADOPTERS.md) +* [Maintainers](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/MAINTAINERS.md) +* [Support](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/SUPPORT.md) +* [License](#license) +* [Security](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/SECURITY.md) +* [Prerequisites](#prerequisites) +* [List of Ansible modules for Dell PowerFlex](#list-of-ansible-modules-for-dell-powerflex) +* [Installation and execution of Ansible modules for Dell PowerFlex](#installation-and-execution-of-ansible-modules-for-dell-powerflex) +* [Maintenance](#maintenance) +## License +The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/1.4.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/1.4.0/MODULE-LICENSE) for the full terms. ## Prerequisites | **Ansible Modules** | **PowerFlex/VxFlex OS Version** | **Red Hat Enterprise Linux**| **SDK version** | **Python version** | **Ansible** | |---------------------|-----------------------|------------------------------|-------|--------------------|--------------------------| -| v1.3.0 | 3.5,
3.6 |7.9,
8.2,
8.4,
8.5 | 1.4.0 | 3.8.x
3.9.x
3.10.x | 2.11
2.12
2.13 | +| v1.4.0 |3.5
3.6
4.0 |7.9,
8.4,
8.5 | 1.4.0 | 3.8.x
3.9.x
3.10.x | 2.11
2.12
2.13 | * Please follow PyPowerFlex installation instructions on [PyPowerFlex Documentation](https://github.com/dell/python-powerflex) ## Idempotency The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed. -## List of Ansible Modules for Dell PowerFlex - * [Info module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#info-module) - * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#snapshot-module) - * [SDC module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#sdc-module) - * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#storage-pool-module) - * [Volume module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#volume-module) - * [SDS module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#sds-module) - * [Device Module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#device-module) - * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#protection-domain-module) - * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/Product%20Guide.md#mdm-cluster-module) - -## Installation of SDK -* Install the python SDK named [PyPowerFlex](https://pypi.org/project/PyPowerFlex/). It can be installed using pip, based on appropriate python version. Execute this command: - - pip install PyPowerFlex -* Alternatively, Clone the repo "https://github.com/dell/python-powerflex" - using command: - - git clone https://github.com/dell/python-powerflex.git - * Go to the root directory of setup. - * Execute this command: - - pip install . -## Building Collections - * Use this command to build the collection from source code: - - ansible-galaxy collection build - - For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball) - -## Installing Collections - -#### Online Installation of Collections - * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/powerflex): - - ansible-galaxy collection install dellemc.powerflex -p - -#### Offline Installation of Collections - - * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system: - - ansible-galaxy collection install dellemc-powerflex-1.3.0.tar.gz -p - - * Set the environment variable: - - export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS: - -## Using Collections - - * In order to use any Ansible module, ensure that the importing of proper FQCN(Fully Qualified Collection Name) must be embedded in the playbook. - This example can be referred to: - - collections: - - dellemc.powerflex - - * In order to use installed collection in a specific task use a proper FQCN(Fully Qualified Collection Name). Refer to this example: - - tasks: - - name: Get Volume details - dellemc.powerflex.volume - - * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example: - - ansible-doc dellemc.powerflex.volume - -## Running Ansible Modules -The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/1.3.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. - -## SSL Certificate Validation - -* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means. -* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command: - - export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<> -* Import the SSL certificate to host using the command: - - update-ca-trust extract -* If "TLS CA certificate bundle error" occurs, then follow these steps: - - cd /etc/pki/tls/certs/ - openssl x509 -in ca-bundle.crt -text -noout - -## Results -Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation. - -## Ansible Execution Environment -Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments. -* Install the ansible builder package using: - - pip3 install ansible-builder -* Create the execution environment using: - - ansible-builder build --tag --container-runtime docker -* After the image is built, run the container using: - - docker run -it /bin/bash -* Verify collection installation using command: - - ansible-galaxy collection list -* The playbook can be run on the container using: - - docker run --rm -v $(pwd):/runner ansible-playbook info_test.yml +## List of Ansible modules for Dell PowerFlex + * [Info module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#info-module) + * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#snapshot-module) + * [SDC module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#sdc-module) + * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#storage-pool-module) + * [Volume module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#volume-module) + * [SDS module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#sds-module) + * [Device Module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#device-module) + * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#protection-domain-module) + * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/Product%20Guide.md#mdm-cluster-module) + +## Installation and execution of Ansible modules for Dell PowerFlex +The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/INSTALLATION.md) ## Maintenance Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). diff --git a/docs/ADOPTERS.md b/docs/ADOPTERS.md new file mode 100644 index 0000000..826b5cd --- /dev/null +++ b/docs/ADOPTERS.md @@ -0,0 +1,11 @@ + + +# List of adopters diff --git a/docs/BRANCHING.md b/docs/BRANCHING.md new file mode 100644 index 0000000..7a50455 --- /dev/null +++ b/docs/BRANCHING.md @@ -0,0 +1,32 @@ + + +# Branching strategy + +Ansible modules for Dell PowerFlex follows a scaled trunk branching strategy where short-lived branches are created off of the main branch. When coding is complete, the branch is merged back into main after being approved in a pull request code review. + +## Branch naming convention + +| Branch Type | Example | Comment | +|--------------|-----------------------------------|-------------------------------------------| +| master | master | | +| Release | release-1.0 | hotfix: release-1.1 patch: release-1.0.1 | +| Feature | feature-9-vol-support | "9" referring to GitHub issue ID | +| Bug Fix | bugfix-110-fix-duplicates-issue | "110" referring to GitHub issue ID | + + +## Steps for working on a release branch + +1. Fork the repository. +2. Create a branch off of the master branch. The branch name should follow [branch naming convention](#branch-naming-convention). +3. Make your changes and commit them to your branch. +4. If other code changes have merged into the upstream master branch, perform a rebase of those changes into your branch. +5. Open a [pull request](https://github.com/dell/ansible-powerflex/pulls) between your branch and the upstream master branch. +6. Once your pull request has merged, your branch can be deleted. diff --git a/docs/CODE_OF_CONDUCT.md b/docs/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..cfc4993 --- /dev/null +++ b/docs/CODE_OF_CONDUCT.md @@ -0,0 +1,137 @@ + + +# Code of conduct - contributor covenant + +## Our pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at ansible.team@dell.com +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary ban + +**Community impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent ban + +**Community impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. \ No newline at end of file diff --git a/docs/COMMITTER_GUIDE.md b/docs/COMMITTER_GUIDE.md new file mode 100644 index 0000000..8af0752 --- /dev/null +++ b/docs/COMMITTER_GUIDE.md @@ -0,0 +1,49 @@ + + +# Committer guidelines + +These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell. + +These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it. + +If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together. + +## General rules + +### Don't + +* Break the build. +* Commit directly. +* Compromise backward compatibility. +* Disrespect your Community Team members. Help them grow. +* Think it is someone elses job to test your code. Write tests for all the code you produce. +* Forget to keep thing simple. +* Create technical debt. Fix-in-place and make it the highest priority above everything else. + +### Do + +* Keep it simple. +* Good work, your best every time. +* Keep the design of your software clean and maintainable. +* Squash your commits, avoid merges. +* Be active. Committers that are not active may have their permissions suspended. +* Write tests for all your deliverables. +* Automate everything. +* Maintain a high code coverage. +* Keep an open communication with other Committers. +* Ask questions. +* Document your contributions and remember to keep it simple. + +## People + +| Name | GitHub ID | Nickname | +|-------|-------------|------------| +| | | | diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 0000000..81f1a9f --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,173 @@ + + +# How to contribute + +Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.4.0/CODE_OF_CONDUCT.md). + +## Table of contents + +* [Become a contributor](#Become-a-contributor) +* [Submitting issues](#Submitting-issues) +* [Triage issues](#Triage-issues) +* [Your first contribution](#Your-first-contribution) +* [Branching](#Branching) +* [Signing your commits](#Signing-your-commits) +* [Pull requests](#Pull-requests) +* [Code reviews](#Code-reviews) +* [TODOs in the code](#TODOs-in-the-code) + +## Become a contributor + +You can contribute to this project in several ways. Here are some examples: + +* Contribute to the Ansible modules for Dell PowerFlex documentation and codebase. +* Report and triage bugs. +* Feature requests. +* Write technical documentation and blog posts, for users and contributors. +* Help others by answering questions about this project. + +## Submitting issues + +All issues related to Ansible modules for Dell PowerFlex, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/dell/ansible-powerflex/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted. + +### Report bugs + +We aim to track and document everything related to Ansible modules for Dell PowerFlex via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process. + +Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/dell/ansible-powerflex/issues) for similar issues. + +Report a bug by submitting a [bug report](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug. + +When opening a Bug please include this information to help with debugging: + +1. Version of relevant software: this software, Ansible, Python, SDK, etc. +2. Details of the issue explaining the problem: what, when, where +3. The expected outcome that was not met (if any) +4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__ + +An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue. + +### Feature request + +If you have an idea of how to improve this project, submit a [feature request](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A). + +### Answering questions + +If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/dell/ansible-powerflex/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A) + +We'd love your help answering questions being asked by other Ansible modules for Dell PowerFlex users. + +## Triage issues + +Triage helps ensure that issues resolve quickly by: + +* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +* Giving a contributor the information they need before they commit to resolving an issue. +* Lowering the issue count by preventing duplicate issues. +* Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell PowerFlex community will thank you for saving them time by spending some of yours. + +Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/1.4.0/ISSUE_TRIAGE.md). + +## Your first contribution + +Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`. + +* [Beginner-friendly](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete. +* [Help wanted](https://github.com/dell/ansible-powerflex/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity. + +When you're ready to contribute, it's time to create a pull request. + +## Branching + +* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/1.4.0/BRANCHING.md) + +## Signing your commits + +We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions. + +GitHub will prevent a pull request from being merged if there are any unsigned commits. + +### Signing a commit + +GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key. + +Make sure you have your user name and e-mail set. This will be required for your signed commit to be properly verified. Check this references: + +* Setting up your github user name [reference](https://help.github.com/articles/setting-your-username-in-git/) +* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/) + +Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits: + +```console +$ git commit -S -m your commit message +# Creates a signed commit +``` + +### Commit message format + +Ansible modules for Dell PowerFlex uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/) + +## Pull requests + +If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request). + +A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it. + +To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines: + +* Title and description matches the implementation. +* Commits within the pull request follow the formatting guidelines. +* The pull request closes one related issue. +* The pull request contains necessary tests that verify the intended behavior. +* If your pull request has conflicts, rebase your branch onto the main branch. + +If the pull request fixes a bug: + +* The pull request description must include `Fixes #`. +* To avoid regressions, the pull request should include tests that replicate the fixed bug. + +The team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they becomes a part of the Git commit body. + +We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible. + +Make sure that the title for your pull request uses the same format as the subject line in the commit message. + +### Quality gates for pull requests + +GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/1.4.0/SUPPORT.md). + +#### Code sanitization + +[GitHub action](https://github.com/dell/ansible-powerflex/actions/workflows/ansible-test.yml) that analyzes source code to flag ansible sanity errors and runs Unit tests. + +## Code reviews + +All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. + +A pull request must satisfy following for it to be merged: + +* A pull request will require at least 2 maintainer approvals. +* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document. +* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again. + +## Code style + +Ensure the added code has the required documenation, examples and unit tests. + +### Sanity + +Run ansible-test sanity --docker default on your code to ensure sanity. Ensure the code does not have any Andersson script violations and not break any existing unit test workflows. + +### TODOs in the code + +We don't like TODOs in the code or documentation. It is really best if you sort out all issues you can see with the changes before we check the changes in. diff --git a/docs/INSTALLATION.md b/docs/INSTALLATION.md new file mode 100644 index 0000000..741a370 --- /dev/null +++ b/docs/INSTALLATION.md @@ -0,0 +1,106 @@ + + +# Installation and execution of Ansible modules for Dell PowerFlex + +## Installation of SDK +* Install the python SDK named [PyPowerFlex](https://pypi.org/project/PyPowerFlex/). It can be installed using pip, based on appropriate python version. Execute this command: + + pip install PyPowerFlex +* Alternatively, Clone the repo "https://github.com/dell/python-powerflex" + using command: + + git clone https://github.com/dell/python-powerflex.git + * Go to the root directory of setup. + * Execute this command: + + pip install . + +## Building collections + * Use this command to build the collection from source code: + + ansible-galaxy collection build + + For more details on how to build a tar ball, please refer to: [Building the collection](https://docs.ansible.com/ansible/latest/dev_guide/developing_collections_distributing.html#building-your-collection-tarball) + +## Installing collections + +#### Online installation of collections + * Use this command to install the latest collection hosted in [galaxy portal](https://galaxy.ansible.com/dellemc/powerflex): + + ansible-galaxy collection install dellemc.powerflex -p + +#### Offline installation of collections + + * Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system: + + ansible-galaxy collection install dellemc-powerflex-1.4.0.tar.gz -p + + * Set the environment variable: + + export ANSIBLE_COLLECTIONS_PATHS=$ANSIBLE_COLLECTIONS_PATHS: + +## Using collections + + * In order to use any Ansible module, ensure that the importing of proper FQCN (Fully Qualified Collection Name) must be embedded in the playbook. + This example can be referred to: + + collections: + - dellemc.powerflex + + * In order to use installed collection in a specific task use a proper FQCN (Fully Qualified Collection Name). Refer to this example: + + tasks: + - name: Get Volume details + dellemc.powerflex.volume + + * For generating Ansible documentation for a specific module, embed the FQCN before the module name. Refer to this example: + + ansible-doc dellemc.powerflex.volume + + +## Ansible modules execution + +The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules. + +## SSL certificate validation + +* Copy the CA certificate to the "/etc/pki/ca-trust/source/anchors" path of the host by any external means. +* Set the "REQUESTS_CA_BUNDLE" environment variable to the path of the SSL certificate using the command: + + export REQUESTS_CA_BUNDLE=/etc/pki/ca-trust/source/anchors/<> +* Import the SSL certificate to host using the command: + + update-ca-trust extract +* If "TLS CA certificate bundle error" occurs, then follow these steps: + + cd /etc/pki/tls/certs/ + openssl x509 -in ca-bundle.crt -text -noout + +## Results +Each module returns the updated state and details of the entity, For example, if you are using the Volume module, all calls will return the updated details of the volume. Sample result is shown in each module's documentation. + +## Ansible execution environment +Ansible can also be installed in a container environment. Ansible Builder provides the ability to create reproducible, self-contained environments as container images that can be run as Ansible execution environments. +* Install the ansible builder package using: + + pip3 install ansible-builder +* Ensure the execution-environment.yml is at the root of collection and create the execution environment using: + + ansible-builder build --tag --container-runtime docker +* After the image is built, run the container using: + + docker run -it /bin/bash +* Verify collection installation using command: + + ansible-galaxy collection list +* The playbook can be run on the container using: + + docker run --rm -v $(pwd):/runner ansible-playbook info_test.yml diff --git a/docs/ISSUE_TRIAGE.md b/docs/ISSUE_TRIAGE.md new file mode 100644 index 0000000..5d9b220 --- /dev/null +++ b/docs/ISSUE_TRIAGE.md @@ -0,0 +1,308 @@ + + +# Triage issues + +The main goal of issue triage is to categorize all incoming issues and make sure each issue has all basic information needed for anyone else to understand and be able to start working on it. + +> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic. + +The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project. + +Triage helps ensure issues resolve quickly by: + +- Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. +- Giving a contributor the information they need before they commit to resolving an issue. +- Lowering the issue count by preventing duplicate issues. +- Streamlining the development process by preventing duplicate discussions. + +If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. + +## 1. Find issues that need triage + +The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label. + +## 2. Ensure the issue contains basic information + +Make sure that the issue's author provided the standard issue information. This project utilizes GitHub issue templates to guide contributors to provide standard information that must be included for each type of template or type of issue. + +### Standard issue information that must be included + +This section describes the various issue templates and the expected content. + +#### Bug reports + +Should explain what happened, what was expected and how to reproduce it together with any additional information that may help giving a complete picture of what happened such as screenshots, output and any environment related information that's applicable and/or maybe related to the reported problem: + + - Ansible Version: [e.g. 2.13] + - Python Version [e.g. 3.10] + - Ansible modules for Dell PowerFlex Version: [e.g. 1.4.0] + - PowerFlex SDK version: [e.g. PyPowerFlex 1.5.0] + - Any other additional information... + +#### Feature requests + +Should explain what feature that the author wants to be added and why that is needed. + +#### Ask a question requests + +In general, if the issue description and title is perceived as a question no more information is needed. + +### Good practices + +To make it easier for everyone to understand and find issues they're searching for it's suggested as a general rule of thumbs to: + +- Make sure that issue titles are named to explain the subject of the issue, has a correct spelling and doesn't include irrelevant information and/or sensitive information. +- Make sure that issue descriptions doesn't include irrelevant information. +- Make sure that issues do not contain sensitive information. +- Make sure that issues have all relevant fields filled in. +- Do your best effort to change title and description or request suggested changes by adding a comment. + +> **Note:** Above rules are applicable to both new and existing issues. + +### Dealing with missing information + +Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`. + +If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`. + +If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +If you receive a notification with additional information provided but you are not anymore on issue triage and you feel you do not have time to handle it, you should delegate it to the current person on issue triage. + +## 3. Categorizing an issue + +### Duplicate issues + +Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think you know there is an existing issue, but can't find it, please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue: + +1. Add a comment `duplicate of #` +2. Add the `triage/duplicate` label + +### Bug reports + +If it's not perfectly clear that it's an actual bug, quickly try to reproduce it. + +**It's a bug/it can be reproduced:** + +1. Add a comment describing detailed steps for how to reproduce it, if applicable. +2. If you know that maintainers wont be able to put any resources into it for some time then label the issue with `help wanted` and optionally `beginner friendly` together with pointers on which code to update to fix the bug. This should signal to the community that we would appreciate any help we can get to resolve this. +3. Move on to [prioritizing the issue](#4-prioritization-of-issues). + +**It can't be reproduced:** + +1. Either [ask for more information](#2-ensure-the-issue-contains-basic-information) needed to investigate it more thoroughly. Provide details in a comment. +2. Either [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment. + +**It works as intended/by design:** + +1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue. +2. Label the issue `triage/works-as-intended`. +3. Remove the `needs-triage` label. + +**It does not work as intended/by design:** + +### Feature requests + +1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue +2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label. + +## 4. Prioritization of issues + +In general bugs and feature request issues should be labeled with a priority. + +This is the most difficult thing with triaging issues since it requires a lot of knowledge, context and experience before being able to think of and start feel comfortable adding a certain priority label. + +The key here is asking for help and discuss issues to understand how more experienced project members think and reason. By doing that you learn more and eventually be more and more comfortable with prioritizing issues. + +In case there is an uncertainty around the prioritization of an issue, please ask the maintainers for help. + +| Label | Description | +| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | +| `priority/critical` | Highest priority. Must be actively worked on as someone's top priority right now. | +| `priority/high` | Must be worked on soon, ideally in time for the next release. | +| `priority/low` | Lowest priority. Possibly useful, but not yet enough interest in it. | + +### Critical priority + +1. If an issue has been categorized and any of this criteria apply, the issue should be labeled as critical and must be actively worked on as someone's top priority right now. + + - Results in any data loss + - Critical security or performance issues + - Problem that makes a feature unusable + - Multiple users experience a severe problem affecting their business, users etc. + +2. Label the issue `priority/critical`. +3. Escalate the problem to the maintainers. +4. Assign or ask a maintainer for help assigning someone to make this issue their top priority right now. +5. Add the issue to the next upcoming release milestone. + +### High priority + +1. Label the issue `priority/high`. +2. Add the issue to the next upcoming release milestone. +3. Prioritize it or assign someone to work on it now or very soon. +4. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +### Low priority + +1. If the issue is deemed possibly useful but a low priority label the issue `priority/low`. +2. The amount of interest in the issue will determine if the priority changes to be higher. +3. Consider requesting [help from the community](#5-requesting-help-from-the-community). + +## 5. Requesting help from the community + +Depending on the issue and/or priority, it's always a good idea to consider signalling to the community that help from community is appreciated and needed in case an issue is not prioritized to be worked on by maintainers. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged. + +In many cases the issue author or community as a whole is more suitable to contribute changes since they're experts in their domain. It's also quite common that someone has tried to get something to work using the documentation without success and made an effort to get it to work and/or reached out to the community to get the missing information. + +1. Kindly and politely add a comment to signal to users subscribed to updates of the issue. + - Explain that the issue would be nice to get resolved, but it isn't prioritized to work on by maintainers for an unforeseen future. + - If possible or applicable, try to help contributors getting starting by adding pointers and references to what code/files need to be changed and/or ideas of a good way to solve/implement the issue. +2. Label the issue with `help wanted`. +3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on. + +## Investigation of issues + +When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, the issue is labeled `triage/needs-information`. Depending on the perceived severity and/or number of [upvotes](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments), the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it. + +Even if you don't have the time or knowledge to investigate an issue we highly recommend that you [upvote](https://help.github.com/en/articles/about-conversations-on-github#reacting-to-ideas-in-comments) the issue if you happen to have the same problem. If you have further details that may help investigating the issue please provide as much information as possible. + +## External pull requests + +Part of issue triage should also be triaging of external PRs. Main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten. + +1. Check new external PRs which do not have a reviewer. +1. Check if there is a link to an existing issue. +1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one. +1. Assign a reviewer based on who was handling the linked issue or what code or feature does the PR touches (look at who was the last to make changes there if all else fails). + +## GitHub issue management workflow + +This section describes the triage workflow for new GitGHub issues that get created. + +### GitHub Issue: Bug + +This workflow starts off with a GitHub issue of type bug being created. + +1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template +2. By default a bug will be created with the `type/bug` and `needs-triage` labels + +The following flow chart outlines the triage process for bugs. + + +``` + +--------------------------+ + | New bug issue opened/more| + | information added | + +-------------|------------+ + | + | + +----------------------------------+ NO +--------------|-------------+ + | label: triage/needs-information --------- All required information | + | | | contained in issue? | + +-----------------------------|----+ +--------------|-------------+ + | | YES + | | + +--------------------------+ | +---------------------+ YES +---------------------------------------+ + |label: | | | Dupicate Issue? ------- Comment `Duplicate of #` + |triage/needs-investigation| | NO | | | Remove needs-triage label | + +------|-------------------+ | +----------|----------+ | label: triage/duplicate | + | | | NO +-----------------|---------------------+ + YES | | | | + | +---------------|----+ NO +------------|------------+ | + | |Needs investigation?|---------- Can it be reproduced? | | + |------- | +------------|------------+ | + +--------------------+ | YES | + | +----------|----------+ + +-------------------------+ +------------|------------+ | Close Issue | + | Add release-found label |------------------ Works as intended? | | | + | label: release-found/* | NO | | +----------|----------+ + +------------|------------+ +------------|------------+ | + | | | + | | YES | + +-----------------------------+ +----------------|----------------+ | + | Add area label | | Add comment | | + | label: area/* | | Remove needs-triage label ------------------| + +------------|----------------+ | label: triage/works-as-intended | + | +---------------------------------+ + | + +------------|-------------+ +----------+ + | Add priority label | | Done ---------------------------------------- + | label: priority/* | +----|-----+ | + +------------|-------------+ |NO | + | | +------------------|------------------+ + +------------|-------------+ +----|----------------+ YES | Add details to issue | + | ------------ Signal Community? ---------- label: help wanted | + |Remove needs-triage label | | | | label: beginner friendly (optional)| + +--------------------------+ +---------------------+ +-------------------------------------+ + +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +### GitHub issue: feature request + +This workflow starts off with a GitHub issue of type feature request being created. + +1. Collaborator or maintainer creates a GitHub feature request using the appropriate GitHub issue template +2. By default a feature request will be created with the `type/feature-request` and `needs-triage` labels + +This flow chart outlines the triage process for feature requests. + + +``` + +---------------------------------+ + |New feature request issue opened/| + |more information added | + +----------------|----------------+ + | + | + +---------------------------------+ NO +-------------|------------+ + | label: triage/needs-information ---------- All required information | + | | | contained in issue? | + +---------------------------------+ +-------------|------------+ + | + | + +---------------------------------------+ | + |Comment `Duplicate of #` | YES +----------|----------+ + |Remove needs-triage label ------- Duplicate issue? | + |label: triage/duplicate | | | + +-----|---------------------------------+ +-----------|---------+ + | |NO + | +-------------------------+ NO +-----------------------------+ + | |Add comment |-------- Does feature request align | + | |Remove needs-triage label| | with product vision? | + | +------|------------------+ +--------------|--------------+ + | | | YES + | | +-----------------|----------------+ + | | |Change feature-request to feature | + | | |Remove label: type/feature-request| + | | |Add label: type/feature | + | | +-----------------|----------------+ + | | | + | | +--------------|--------------+ + | | | Add area label | + | | | label: area/* | + | | +--------------|--------------+ + | | | + +-|---------|---+ +--------+ +--------------|--------------+ + | Close issue | | Done --------- Add priority label | + | | | | | label: priority/* | + +---------------+ +--------+ +-----------------------------+ +``` + +If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided. + +In some cases you may receive a request you do not wish to accept. Perhaps the request doesn't align with the project scope or vision. It is important to tactfully handle contributions that don't meet the project standards. + +1. Acknowledge the person behind the contribution and thank them for their interest and contribution +2. Explain why it didn't fit into the scope of the project or vision +3. Don't leave an unwanted contributions open. Immediately close the contribution you do not wish to accept diff --git a/docs/MAINTAINERS.md b/docs/MAINTAINERS.md new file mode 100644 index 0000000..72fac72 --- /dev/null +++ b/docs/MAINTAINERS.md @@ -0,0 +1,17 @@ + + +# Maintainers + +* @ananthu-kuttattu +* @bhavneet-sharma +* @jennifer-john +* @pavan-mudunuri +* @trisha-datta diff --git a/docs/MAINTAINER_GUIDE.md b/docs/MAINTAINER_GUIDE.md new file mode 100644 index 0000000..45191a5 --- /dev/null +++ b/docs/MAINTAINER_GUIDE.md @@ -0,0 +1,38 @@ + + +# Maintainer guidelines + +As a Maintainer of this project you have the responsibility of keeping true to the vision of the project with unprecedented quality. Being part of this group is a privilege that requires dedication and time to attend to the daily activities that are associated with the maintenance of this project. + +## Becoming a maintainer + +Most Maintainers started as Contributors that have demonstrated their commitment to the success of the project. Contributors wishing to become Maintainers, must demonstrate commitment to the success of the project by contributing code, reviewing others' work, and triaging issues on a regular basis for at least three months. + +The contributions alone don't make you a Maintainer. You need to earn the trust of the current Maintainers and other project Contributors, that your decisions and actions are in the best interest of the project. + +Periodically, the existing Maintainers curate a list of Contributors who have shown regular activity on the project over the prior months. It is from this list that Maintainer candidates are selected. + +After a candidate is selected, the existing Maintainers discuss the candidate over the next 5 business days, provide feedback, and vote. At least 75% of the current Maintainers must vote in the affirmative for a candidate to be moved to the role of Maintainer. + +If a candidate is approved, a Maintainer contacts the candidate to invite them to open a pull request that adds the contributor to the MAINTAINERS file. The candidate becomes a Maintainer once the pull request is merged. + +## Maintainer policies + +* Lead by example +* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.4.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/1.4.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/1.4.0/COMMITTER_GUIDE.md) guides +* Promote a friendly and collaborative environment within our community +* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests +* Criticize code, not people. Ideally, tell the contributor a better way to do what they need. +* Clearly mark optional suggestions as such. Best practice, start your comment with *At your option: …* + +## Project decision making + +All project decisions should contribute to successfully executing on the project roadmap. Project milestones are established for each release. \ No newline at end of file diff --git a/docs/Product Guide.md b/docs/Product Guide.md index 2720b27..5839b5b 100644 --- a/docs/Product Guide.md +++ b/docs/Product Guide.md @@ -1,5 +1,5 @@ # Ansible Modules for Dell Technologies PowerFlex -## Product Guide 1.3.0 +## Product Guide 1.4.0 © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell, and other trademarks are trademarks of Dell Inc. or its subsidiaries. Other trademarks may be trademarks of their respective owners. -------------- @@ -707,6 +707,14 @@ Gathering information about Dell PowerFlex Choices Description + + gather_subset + list
elements: str + + +
  • vol
  • storage_pool
  • protection_domain
  • sdc
  • sds
  • snapshot_policy
  • device
+
List of string variables to specify the Powerflex storage system entities for which information is required.
Volumes - vol.
Storage pools - storage_pool.
Protection domains - protection_domain.
SDCs - sdc.
SDSs - sds.
Snapshot policies - snapshot_policy.
Devices - device. + filters list
elements: dict @@ -726,45 +734,37 @@ Gathering information about Dell PowerFlex   - filter_value + filter_operator str True - -
Value of the filter key. +
  • equal
+
Operation to be performed on filter key.   - filter_operator + filter_value str True -
  • equal
-
Operation to be performed on filter key. + +
Value of the filter key. - timeout - int + gateway_host + str + True - 120 -
Time after which connection will get terminated.
It is to be mentioned in seconds. +
IP or FQDN of the PowerFlex gateway host. - verifycert - bool - + username + str True -
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified. - - - port - int - - 443 -
Port number through which communication happens with PowerFlex gateway host. +
The username of the PowerFlex gateway host. password @@ -775,30 +775,30 @@ Gathering information about Dell PowerFlex
The password of the PowerFlex gateway host. - gather_subset - list
elements: str + verifycert + bool + True -
  • vol
  • storage_pool
  • protection_domain
  • sdc
  • sds
  • snapshot_policy
  • device
-
List of string variables to specify the Powerflex storage system entities for which information is required.
Volumes - vol.
Storage pools - storage_pool.
Protection domains - protection_domain.
SDCs - sdc.
SDSs - sds.
Snapshot policies - snapshot_policy.
Devices - device. +
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified. - username - str - True + port + int + 443 -
The username of the PowerFlex gateway host. +
Port number through which communication happens with PowerFlex gateway host. - gateway_host - str - True + timeout + int + 120 -
IP or FQDN of the PowerFlex gateway host. +
Time after which connection will get terminated.
It is to be mentioned in seconds. - + ### Notes * The check_mode is supported. @@ -836,341 +836,548 @@ Gathering information about Dell PowerFlex ``` ### Return Values - + - + - - - + + + - + + + + + + + - + - + - + - + - - - - - - - + + + + + + + + - + + + + + + + + - + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + - + - - - - - - - + - + - + + + + + + + + + + + + + + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - - - - - - + + + + + + + + + + + + + + + - + - + - + - + + + + + + + + - - - - - - - - - - - - - + - + - + - + - + - + - + - + - + - + - + - + - - + + - + - - + + - + - - + + - + - + + + + + + + - - + + - + - + - + - + + + + + + + - - + + - + - + - + - - + + - + - + - + - + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + + - + - + + + + + + + - - + + - + - - + + - + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - + + - + - + - + - + - + - - + + - + - + - + - - + + - + - + - + - - + + - + - + + + + + + + + + + + + + + + + + - - + + - + - - + + - + -
KeyKey Type Returned Description
Snapshot_Policies list
API_Version str always Details of snapshot policies. API version of PowerFlex API Gateway.
Array_Details dict always System entities of PowerFlex storage array.
  id addressSpaceUsage str success snapshot policy id. Address space usage.
  name authenticationMethod str success snapshot policy name. Authentication method.
Devices list always Details of devices.
  capacityAlertCriticalThresholdPercent int success Capacity alert critical threshold percentage.
  id capacityAlertHighThresholdPercent int success Capacity alert high threshold percentage.
  capacityTimeLeftInDays str success device id. Capacity time left in days.
  cliPasswordAllowed bool success CLI password allowed.
  daysInstalled int success Days installed.
  name defragmentationEnabled bool success Defragmentation enabled.
  enterpriseFeaturesEnabled bool success Enterprise features enabled.
  id str success device name. The ID of the system.
SDSs list always Details of storage data servers.
  id installId str success storage data server id. installation Id.
  isInitialLicense bool success Initial license.
  lastUpgradeTime int success Last upgrade time.
  name managementClientSecureCommunicationEnabled bool success Management client secure communication enabled.
  maxCapacityInGb dict success Maximum capacity in GB.
  mdmCluster dict success MDM cluster details.
  mdmExternalPort int success MDM external port.
  mdmManagementPort int success MDM management port.
  mdmSecurityPolicy str success storage data server name. MDM security policy.
Storage_Pools list always Details of storage pools.
  showGuid bool success Show guid.
  swid str success SWID.
  id systemVersionName str success storage pool id. System version and name.
  name tlsVersion str success storage pool name. TLS version.
  upgradeState str success Upgrade state.
API_Version str always API version of PowerFlex API Gateway.
changed bool always Whether or not the resource has changed.
Volumes Devices list always Details of volumes. Details of devices.
  id id str success volume id. device id.
  name name str success volume name. device name.
Protection_Domains Protection_Domains list always Details of all protection domains.
  id id str success protection domain id.
  name name str success protection domain name.
SDCs SDCs list always Details of storage data clients.
  id id str success storage data client id.
  name name str success storage data client name.
Array_Details dict SDSs list always System entities of PowerFlex storage array. Details of storage data servers.
  isInitialLicense bool id str success Initial license. storage data server id.
  daysInstalled int name str success Days installed. storage data server name.
Snapshot_Policies list always Details of snapshot policies.
  mdmManagementPort int id str success MDM management port. snapshot policy id.
  authenticationMethod name str success Authentication method. snapshot policy name.
Storage_Pools list always Details of storage pools.
  cliPasswordAllowed bool id str success CLI password allowed. ID of the storage pool under protection domain.
  tlsVersion mediaType str success TLS version. Type of devices in the storage pool.
  managementClientSecureCommunicationEnabled bool name str success Management client secure communication enabled. Name of the storage pool under protection domain.
  mdmSecurityPolicy protectionDomainId str success MDM security policy. ID of the protection domain in which pool resides.
  systemVersionName protectionDomainName str success System version and name. Name of the protection domain in which pool resides.
  showGuid statistics complex success Statistics details of the storage pool.
   capacityInUseInKb str success Total capacity of the storage pool.
   deviceIds list success Device Ids of the storage pool.
   unusedCapacityInKb str success Unused capacity of the storage pool.
  useRfcache bool success Show guid. Enable/Disable RFcache on a specific storage pool.
  maxCapacityInGb dict useRmcache bool success Maximum capacity in GB. Enable/Disable RMcache on a specific storage pool.
Volumes list always Details of volumes.
  mdmCluster dict id str success MDM cluster details. The ID of the volume.
  capacityAlertHighThresholdPercent int mappedSdcInfo complex success Capacity alert high threshold percentage. The details of the mapped SDC.
   accessMode str success mapping access mode for the specified volume.
   limitBwInMbps int success Bandwidth limit for the SDC.
   limitIops int success IOPS limit for the SDC.
   sdcId str success ID of the SDC.
   sdcIp str success IP of the SDC.
   sdcName str success Name of the SDC.
  swid name str success SWID. Name of the volume.
  enterpriseFeaturesEnabled bool protectionDomainId str success Enterprise eatures enabled. ID of the protection domain in which volume resides.
  installId protectionDomainName str success installation Id. Name of the protection domain in which volume resides.
  lastUpgradeTime sizeInGb int success Last upgrade time. Size of the volume in Gb.
  id str sizeInKb int success The ID of the system. Size of the volume in Kb.
  upgradeState snapshotPolicyId str success Upgrade state. ID of the snapshot policy associated with volume.
  capacityAlertCriticalThresholdPercent int snapshotPolicyName str success Capacity alert critical threshold percentage. Name of the snapshot policy associated with volume.
  addressSpaceUsage snapshotsList str success Address space usage. List of snapshots associated with the volume.
  capacityTimeLeftInDays str statistics complex success Capacity time left in days. Statistics details of the storage pool.
   numOfChildVolumes int success Number of child volumes.
   numOfMappedSdcs int success Number of mapped Sdcs of the volume.
  mdmExternalPort int storagePoolId str success MDM external port. ID of the storage pool in which volume resides.
  defragmentationEnabled bool storagePoolName str success Defragmentation enabled. Name of the storage pool in which volume resides.
+ + changed + bool + always + Whether or not the resource has changed. + + ### Authors * Arindam Datta (@dattaarindam) @@ -4023,13 +4230,21 @@ Managing Dell PowerFlex storage pool Choices Description + + storage_pool_name + str + + + +
The name of the storage pool.
If more than one storage pool is found with the same name then protection domain id/name is required to perform the task.
Mutually exclusive with storage_pool_id. + - timeout - int + storage_pool_id + str - 120 -
Time after which connection will get terminated.
It is to be mentioned in seconds. + +
The id of the storage pool.
It is auto generated, hence should not be provided during creation of a storage pool.
Mutually exclusive with storage_pool_name. protection_domain_name @@ -4040,12 +4255,12 @@ Managing Dell PowerFlex storage pool
The name of the protection domain.
During creation of a pool, either protection domain name or id must be mentioned.
Mutually exclusive with protection_domain_id. - port - int + protection_domain_id + str - 443 -
Port number through which communication happens with PowerFlex gateway host. + +
The id of the protection domain.
During creation of a pool, either protection domain name or id must be mentioned.
Mutually exclusive with protection_domain_name. media_type @@ -4054,14 +4269,6 @@ Managing Dell PowerFlex storage pool
  • HDD
  • SSD
  • TRANSITIONAL

Type of devices in the storage pool. - - - use_rmcache - bool - - - -
Enable/Disable RMcache on a specific storage pool. storage_pool_new_name @@ -4070,14 +4277,6 @@ Managing Dell PowerFlex storage pool
New name for the storage pool can be provided.
This parameter is used for renaming the storage pool. - - - gateway_host - str - True - - -
IP or FQDN of the PowerFlex gateway host. use_rfcache @@ -4088,12 +4287,12 @@ Managing Dell PowerFlex storage pool
Enable/Disable RFcache on a specific storage pool. - protection_domain_id - str + use_rmcache + bool -
The id of the protection domain.
During creation of a pool, either protection domain name or id must be mentioned.
Mutually exclusive with protection_domain_name. +
Enable/Disable RMcache on a specific storage pool. state @@ -4104,20 +4303,12 @@ Managing Dell PowerFlex storage pool
State of the storage pool. - password + gateway_host str True -
The password of the PowerFlex gateway host. - - - storage_pool_name - str - - - -
The name of the storage pool.
If more than one storage pool is found with the same name then protection domain id/name is required to perform the task.
Mutually exclusive with storage_pool_id. +
IP or FQDN of the PowerFlex gateway host. username @@ -4128,12 +4319,12 @@ Managing Dell PowerFlex storage pool
The username of the PowerFlex gateway host. - storage_pool_id + password str + True - -
The id of the storage pool.
It is auto generated, hence should not be provided during creation of a storage pool.
Mutually exclusive with storage_pool_name. +
The password of the PowerFlex gateway host. verifycert @@ -4143,7 +4334,23 @@ Managing Dell PowerFlex storage pool
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified. - + + port + int + + 443 + +
Port number through which communication happens with PowerFlex gateway host. + + + timeout + int + + 120 + +
Time after which connection will get terminated.
It is to be mentioned in seconds. + + ### Notes * TRANSITIONAL media type is supported only during modification. @@ -4206,76 +4413,107 @@ Managing Dell PowerFlex storage pool ``` ### Return Values - + - + - - + + - + - + - - + + - + - + - + - - + + - + - + - + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - + + - + -
KeyKey Type Returned Description
changed
changed bool always Whether or not the resource has changed.
storage_pool_details storage_pool_details complex When storage pool exists Details of the storage pool.
  id id str success ID of the storage pool under protection domain.
  useRfcache bool mediaType str success Enable/Disable RFcache on a specific storage pool. Type of devices in the storage pool.
  protectionDomainName name str success Name of the protection domain in which pool resides. Name of the storage pool under protection domain.
  useRmcache bool protectionDomainId str success Enable/Disable RMcache on a specific storage pool. ID of the protection domain in which pool resides.
  name protectionDomainName str success Name of the storage pool under protection domain. Name of the protection domain in which pool resides.
  protectionDomainId str statistics complex success ID of the protection domain in which pool resides. Statistics details of the storage pool.
   capacityInUseInKb str success Total capacity of the storage pool.
   deviceIds list success Device Ids of the storage pool.
   unusedCapacityInKb str success Unused capacity of the storage pool.
  useRfcache bool success Enable/Disable RFcache on a specific storage pool.
  mediaType str useRmcache bool success Type of devices in the storage pool. Enable/Disable RMcache on a specific storage pool.
+ ### Authors * Arindam Datta (@dattaarindam) @@ -4301,37 +4539,37 @@ Manage volumes on Dell PowerFlex Choices Description - - size - int + + vol_name + str -
The size of the volume.
Size of the volume will be assigned as higher multiple of 8 GB. +
The name of the volume.
Mandatory for create operation.
It is unique across the PowerFlex array.
Mutually exclusive with vol_id. - timeout - int + vol_id + str - 120 -
Time after which connection will get terminated.
It is to be mentioned in seconds. + +
The ID of the volume.
Except create operation, all other operations can be performed using vol_id.
Mutually exclusive with vol_name. - auto_snap_remove_type + storage_pool_name str -
  • remove
  • detach
-
Whether to remove or detach the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type.
If the snapshot policy name/id is passed empty then auto_snap_remove_type is defaulted to 'detach'. + +
The name of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
During creation, if storage pool name is provided then either protection domain name or id must be mentioned along with it.
Mutually exclusive with storage_pool_id. - vol_name + storage_pool_id str -
The name of the volume.
Mandatory for create operation.
It is unique across the PowerFlex array.
Mutually exclusive with vol_id. +
The ID of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
Mutually exclusive with storage_pool_name. protection_domain_name @@ -4342,28 +4580,92 @@ Manage volumes on Dell PowerFlex
The name of the protection domain.
During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it.
Mutually exclusive with protection_domain_id. - vol_id + protection_domain_id str -
The ID of the volume.
Except create operation, all other operations can be performed using vol_id.
Mutually exclusive with vol_id. +
The ID of the protection domain.
During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it.
Mutually exclusive with protection_domain_name. - port + vol_type + str + + +
  • THICK_PROVISIONED
  • THIN_PROVISIONED
+
Type of volume provisioning. + + + compression_type + str + + +
  • NORMAL
  • NONE
+
Type of the compression method. + + + use_rmcache + bool + + + +
Whether to use RM Cache or not. + + + snapshot_policy_name + str + + + +
Name of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. + + + snapshot_policy_id + str + + + +
ID of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. + + + auto_snap_remove_type + str + + +
  • remove
  • detach
+
Whether to remove or detach the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type.
If the snapshot policy name/id is passed empty then auto_snap_remove_type is defaulted to 'detach'. + + + size int - 443 -
Port number through which communication happens with PowerFlex gateway host. + +
The size of the volume.
Size of the volume will be assigned as higher multiple of 8 GB. - vol_type + cap_unit str -
  • THICK_PROVISIONED
  • THIN_PROVISIONED
-
Type of volume provisioning. +
  • GB
  • TB
+
The unit of the volume size. It defaults to 'GB'. + + + vol_new_name + str + + + +
New name of the volume. Used to rename the volume. + + + allow_multiple_mappings + bool + + + +
Specifies whether to allow or not allow multiple mappings.
If the volume is mapped to one SDC then for every new mapping allow_multiple_mappings has to be passed as True. sdc @@ -4375,39 +4677,30 @@ Manage volumes on Dell PowerFlex   - sdc_ip - str - - - -
IP of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip. - - -   - sdc_id + sdc_name str -
ID of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_name and sdc_ip. +
Name of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip.   - sdc_name + sdc_id str -
Name of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip. +
ID of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_name and sdc_ip.   - iops_limit - int + sdc_ip + str -
Limit of volume IOPS.
Minimum IOPS limit is 11 and specify 0 for unlimited iops. +
IP of the SDC.
Specify either sdc_name, sdc_id or sdc_ip.
Mutually exclusive with sdc_id and sdc_ip.   @@ -4427,31 +4720,16 @@ Manage volumes on Dell PowerFlex
Limit of volume network bandwidth.
Need to mention in multiple of 1024 Kbps.
To set no limit, 0 is to be passed. - - protection_domain_id - str - - - -
The ID of the protection domain.
During creation of a volume, if more than one storage pool exists with the same name then either protection domain name or id must be mentioned along with it.
Mutually exclusive with protection_domain_name. - - - verifycert - bool - - True - -
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified. - - - allow_multiple_mappings - bool - - - -
Specifies whether to allow multiple mappings or not.
If the volume is mapped to one SDC then for every new mapping allow_multiple_mappings has to be passed as True. - +   + iops_limit + int + + + +
Limit of volume IOPS.
Minimum IOPS limit is 11 and specify 0 for unlimited iops. + + sdc_state str @@ -4460,36 +4738,20 @@ Manage volumes on Dell PowerFlex
Mapping state of the SDC. - snapshot_policy_name - str - - + delete_snapshots + bool -
Name of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. - - - compression_type - str -
  • NORMAL
  • NONE
-
Type of the compression method. +
If True, the volume and all its dependent snapshots will be deleted.
If False, only the volume will be deleted.
It can be specified only when the state is absent.
It defaults to False, if not specified. - password + state str True - -
The password of the PowerFlex gateway host. - - - snapshot_policy_id - str - - - -
ID of the snapshot policy.
To remove/detach snapshot policy, empty snapshot_policy_id/snapshot_policy_name is to be passed along with auto_snap_remove_type. +
  • present
  • absent
+
State of the volume. gateway_host @@ -4500,70 +4762,46 @@ Manage volumes on Dell PowerFlex
IP or FQDN of the PowerFlex gateway host. - use_rmcache - bool - - - -
Whether to use RM Cache or not. - - - cap_unit + username str + True -
  • GB
  • TB
-
The unit of the volume size. It defaults to 'GB'. - - - delete_snapshots - bool - - - -
If True, the volume and all its dependent snapshots will be deleted.
If False, only the volume will be deleted.
It can be specified only when the state is absent.
It defaults to False, if not specified. +
The username of the PowerFlex gateway host. - state + password str True -
  • present
  • absent
-
State of the volume. - - - storage_pool_name - str - - -
The name of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
During creation, if storage pool name is provided then either protection domain name or id must be mentioned along with it.
Mutually exclusive with storage_pool_id. +
The password of the PowerFlex gateway host. - vol_new_name - str - + verifycert + bool + True -
New name of the volume. Used to rename the volume. +
Boolean variable to specify whether or not to validate SSL certificate.
True - Indicates that the SSL certificate should be verified.
False - Indicates that the SSL certificate should not be verified. - storage_pool_id - str - + port + int + 443 -
The ID of the storage pool.
Either name or the id of the storage pool is required for creating a volume.
Mutually exclusive with storage_pool_name. +
Port number through which communication happens with PowerFlex gateway host. - username - str - True + timeout + int + 120 -
The username of the PowerFlex gateway host. +
Time after which connection will get terminated.
It is to be mentioned in seconds. - + ### Notes * The check_mode is not supported. @@ -4682,51 +4920,36 @@ Manage volumes on Dell PowerFlex ``` ### Return Values - + - + - - + + + + + + + + - - - - - - - - - - - - - - - + - - - - - - - - + - + @@ -4734,107 +4957,145 @@ Manage volumes on Dell PowerFlex - - + + - + - - + + - + - - + + - + - + - + - - + + - + - + - + - + - + - + + + + + + + + - + + + + + + + + + + + + + + + - + - + - + - + - + - - + + - + + + + + + + + + + + + + + + + + + + + + + + + - + - + - - - - - - -
KeyKey Type Returned Description
volume_details
changed bool always Whether or not the resource has changed.
volume_details complex When volume exists Details of the volume.
  snapshotPolicyId str success ID of the snapshot policy associated with volume.
  snapshotsList str success List of snapshots associated with the volume.
  snapshotPolicyName id str success Name of the snapshot policy associated with volume.
  sizeInGb int success Size of the volume in Gb. The ID of the volume.
  mappedSdcInfo mappedSdcInfo complex success The details of the mapped SDC.
    limitBwInMbps int accessMode str success Bandwidth limit for the SDC. mapping access mode for the specified volume.
    accessMode str limitBwInMbps int success mapping access mode for the specified volume. Bandwidth limit for the SDC.
    sdcName str limitIops int success Name of the SDC. IOPS limit for the SDC.
    sdcIp sdcId str success IP of the SDC. ID of the SDC.
    limitIops int sdcIp str success IOPS limit for the SDC. IP of the SDC.
    sdcId sdcName str success ID of the SDC. Name of the SDC.
  id name str success The ID of the volume. Name of the volume.
  protectionDomainName protectionDomainId str success ID of the protection domain in which volume resides.
  protectionDomainName str success Name of the protection domain in which volume resides.
  storagePoolName sizeInGb int success Size of the volume in Gb.
  sizeInKb int success Size of the volume in Kb.
  snapshotPolicyId str success Name of the storage pool in which volume resides. ID of the snapshot policy associated with volume.
  storagePoolId snapshotPolicyName str success ID of the storage pool in which volume resides. Name of the snapshot policy associated with volume.
  protectionDomainId snapshotsList str success ID of the protection domain in which volume resides. List of snapshots associated with the volume.
  sizeInKb int statistics complex success Size of the volume in Kb. Statistics details of the storage pool.
   numOfChildVolumes int success Number of child volumes.
   numOfMappedSdcs int success Number of mapped Sdcs of the volume.
  storagePoolId str success ID of the storage pool in which volume resides.
  name storagePoolName str success Name of the volume. Name of the storage pool in which volume resides.
changed bool always Whether or not the resource has changed.
+ ### Authors * P Srinivas Rao (@srinivas-rao5) diff --git a/docs/Release Notes.md b/docs/Release Notes.md index a5a345d..5fcf94c 100644 --- a/docs/Release Notes.md +++ b/docs/Release Notes.md @@ -1,6 +1,6 @@ **Ansible Modules for Dell Technologies PowerFlex** ========================================= -### Release Notes 1.3.0 +### Release notes 1.4.0 > © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell > and other trademarks are trademarks of Dell Inc. or its @@ -20,7 +20,7 @@ Modules for Dell Technologies (Dell) PowerFlex. - [Distribution](#distribution) - [Documentation](#documentation) -Revision History +Revision history ---------------- The table in this section lists the revision history of this document. @@ -28,9 +28,9 @@ Table 1. Revision history | Revision | Date | Description | |----------|-----------|-------------------------------------------------------------| -| 01 | June 2022 | Current release of Ansible Modules for Dell PowerFlex 1.3.0 | +| 01 | Sep 2022 | Current release of Ansible Modules for Dell PowerFlex 1.4.0 | -Product Description +Product description ------------------- The Ansible modules for Dell PowerFlex are used to automate and orchestrate @@ -41,20 +41,13 @@ cluster, and obtaining high-level information about a PowerFlex system informati The modules use playbooks to list, show, create, delete, and modify each of the entities. -New Features and enhancements +New features and enhancements ----------------------------- Along with the previous release deliverables, this release supports following features - -- The Product Guide, Release Notes and ReadMe have been updated to adhere to the guidelines by the ansible community. -- MDM cluster module supports following functionalities: - * Get MDM cluster details. - * Add a standby MDM. - * Remove a standby MDM. - * Modify attributes of an MDM. - * Change ownership of MDM cluster. - * Switch MDM cluster mode. - * Modify performance profile of MDM cluster. -- Enabled the check_mode support for info module. -- Added execution environment manifest file to support building an execution environment with ansible-builder. +- Info module is enhanced to support the listing volumes and storage pools with statistics data​. +- Storage pool module is enhanced to get the details with statistics data​. +- Volume module is enhanced to get the details with statistics data​. +- Added support for the 4.0.x release of PowerFlex OS. Known issues ------------ @@ -67,11 +60,11 @@ Limitations Distribution ------------ The software package is available for download from the [Ansible Modules -for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.3.0) page. +for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.4.0) page. Documentation ------------- -The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.3.0/docs) +The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.4.0/docs) page. It includes the following: - README diff --git a/docs/SECURITY.md b/docs/SECURITY.md new file mode 100644 index 0000000..05d3221 --- /dev/null +++ b/docs/SECURITY.md @@ -0,0 +1,22 @@ + + +# Security policy + +The Ansible modules for Dell PowerFlex repository are inspected for security vulnerabilities via blackduck scans and static code analysis. + +In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/1.4.0/docs/CONTRIBUTING.md#Pull-requests) for more information. + +## Reporting a vulnerability + +Have you discovered a security vulnerability in this project? +We ask you to alert the maintainers by sending an email, describing the issue, impact, and fix - if applicable. + +You can reach the Ansible modules for Dell PowerFlex maintainers at ansible.team@dell.com. diff --git a/docs/SUPPORT.md b/docs/SUPPORT.md new file mode 100644 index 0000000..26e6f15 --- /dev/null +++ b/docs/SUPPORT.md @@ -0,0 +1,12 @@ + + +## Support +For all your support needs you can interact with us on [GitHub](https://github.com/dell/ansible-powerflex) by creating a [GitHub Issue](https://github.com/dell/ansible-powerflex/issues) or through the [Ansible Community](https://www.dell.com/community/Automation/bd-p/Automation). diff --git a/galaxy.yml b/galaxy.yml index ecb3868..fb17554 100644 --- a/galaxy.yml +++ b/galaxy.yml @@ -9,7 +9,7 @@ namespace: dellemc name: powerflex # The version of the collection. Must be compatible with semantic versioning -version: 1.3.0 +version: 1.4.0 # The path to the Markdown (.md) readme file. This path is relative to the root of the collection readme: README.md @@ -22,6 +22,7 @@ authors: - P Srinivas Rao - Rajshree Khare - Bhavneet Sharma +- Ananthu S Kuttattu ### OPTIONAL but strongly recommended # A short summary description of the collection @@ -48,13 +49,13 @@ tags: [storage] dependencies: {} # The URL of the originating SCM repository -repository: https://github.com/dell/ansible-powerflex/tree/1.3.0 +repository: https://github.com/dell/ansible-powerflex/tree/1.4.0 # The URL to any online docs -documentation: https://github.com/dell/ansible-powerflex/tree/1.3.0/docs +documentation: https://github.com/dell/ansible-powerflex/tree/1.4.0/docs # The URL to the homepage of the collection/project -homepage: https://github.com/dell/ansible-powerflex/tree/1.3.0 +homepage: https://github.com/dell/ansible-powerflex/tree/1.4.0 # The URL to the collection issue tracker issues: https://www.dell.com/community/Automation/bd-p/Automation diff --git a/meta/execution-environment.yml b/meta/execution-environment.yml index e60372e..d2c0a3e 100644 --- a/meta/execution-environment.yml +++ b/meta/execution-environment.yml @@ -1,5 +1,5 @@ --- version: 1 dependencies: - galaxy: ../requirements.yml #Absolute/relative path of requirements.yml - python: ../requirements.txt #Absolute/relative path of requirements.txt + galaxy: requirements.yml #Absolute/relative path of requirements.yml + python: requirements.txt #Absolute/relative path of requirements.txt diff --git a/plugins/module_utils/storage/dell/utils.py b/plugins/module_utils/storage/dell/utils.py index bfa770c..051ae0c 100644 --- a/plugins/module_utils/storage/dell/utils.py +++ b/plugins/module_utils/storage/dell/utils.py @@ -134,14 +134,14 @@ def pypowerflex_version_check(): missing_packages += 'pkg_resources, ' if not HAS_POWERFLEX_SDK: - missing_packages += 'PyPowerFlex V 1.4.0 or above' + missing_packages += 'PyPowerFlex V 1.5.0 or above' else: - min_ver = '1.4.0' + min_ver = '1.5.0' curr_version = pkg_resources.require("PyPowerFlex")[0].version supported_version = parse_version(curr_version) >= parse_version( min_ver) if not supported_version: - missing_packages += 'PyPowerFlex V 1.4.0 or above' + missing_packages += 'PyPowerFlex V 1.5.0 or above' missing_packages_check = dict( dependency_present=False if missing_packages else True, diff --git a/plugins/modules/info.py b/plugins/modules/info.py index 468fa4d..2b2bab5 100644 --- a/plugins/modules/info.py +++ b/plugins/modules/info.py @@ -137,7 +137,7 @@ description: Defragmentation enabled. type: bool enterpriseFeaturesEnabled: - description: Enterprise eatures enabled. + description: Enterprise features enabled. type: bool id: description: The ID of the system. @@ -370,20 +370,409 @@ returned: always type: list contains: + mediaType: + description: Type of devices in the storage pool. + type: str + useRfcache: + description: Enable/Disable RFcache on a specific storage pool. + type: bool + useRmcache: + description: Enable/Disable RMcache on a specific storage pool. + type: bool id: - description: storage pool id. + description: ID of the storage pool under protection domain. type: str name: - description: storage pool name. + description: Name of the storage pool under protection domain. type: str + protectionDomainId: + description: ID of the protection domain in which pool resides. + type: str + protectionDomainName: + description: Name of the protection domain in which pool resides. + type: str + "statistics": + description: Statistics details of the storage pool. + type: complex + contains: + "capacityInUseInKb": + description: Total capacity of the storage pool. + type: str + "unusedCapacityInKb": + description: Unused capacity of the storage pool. + type: str + "deviceIds": + description: Device Ids of the storage pool. + type: list sample: [ { + "addressSpaceUsage": "Normal", + "addressSpaceUsageType": "DeviceCapacityLimit", + "backgroundScannerBWLimitKBps": 3072, + "backgroundScannerMode": "DataComparison", + "bgScannerCompareErrorAction": "ReportAndFix", + "bgScannerReadErrorAction": "ReportAndFix", + "capacityAlertCriticalThreshold": 90, + "capacityAlertHighThreshold": 80, + "capacityUsageState": "Normal", + "capacityUsageType": "NetCapacity", + "checksumEnabled": false, + "compressionMethod": "Invalid", + "dataLayout": "MediumGranularity", + "externalAccelerationType": "None", + "fglAccpId": null, + "fglExtraCapacity": null, + "fglMaxCompressionRatio": null, + "fglMetadataSizeXx100": null, + "fglNvdimmMetadataAmortizationX100": null, + "fglNvdimmWriteCacheSizeInMb": null, + "fglOverProvisioningFactor": null, + "fglPerfProfile": null, + "fglWriteAtomicitySize": null, + "fragmentationEnabled": true, "id": "e0d8f6c900000000", - "name": "pool1" - }, - { - "id": "e0d96c1f00000002", - "name": "pool1" + "links": [ + { + "href": "/api/instances/StoragePool::e0d8f6c900000000", + "rel": "self" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Statistics", + "rel": "/api/StoragePool/relationship/Statistics" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/SpSds", + "rel": "/api/StoragePool/relationship/SpSds" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Volume", + "rel": "/api/StoragePool/relationship/Volume" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/Device", + "rel": "/api/StoragePool/relationship/Device" + }, + { + "href": "/api/instances/StoragePool::e0d8f6c900000000 + /relationships/VTree", + "rel": "/api/StoragePool/relationship/VTree" + }, + { + "href": "/api/instances/ProtectionDomain::9300c1f900000000", + "rel": "/api/parent/relationship/protectionDomainId" + } + ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, + "mediaType": "HDD", + "name": "pool1", + "numOfParallelRebuildRebalanceJobsPerDevice": 2, + "persistentChecksumBuilderLimitKb": 3072, + "persistentChecksumEnabled": true, + "persistentChecksumState": "Protected", + "persistentChecksumValidateOnRead": false, + "protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps": null, + "protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold": null, + "protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps": 10240, + "protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice": 1, + "protectedMaintenanceModeIoPriorityPolicy": "limitNumOfConcurrentIos", + "protectedMaintenanceModeIoPriorityQuietPeriodInMsec": null, + "protectionDomainId": "9300c1f900000000", + "protectionDomainName": "domain1", + "rebalanceEnabled": true, + "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebalanceIoPriorityAppIopsPerDeviceThreshold": null, + "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebalanceIoPriorityPolicy": "favorAppIos", + "rebalanceIoPriorityQuietPeriodInMsec": null, + "rebuildEnabled": true, + "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null, + "rebuildIoPriorityAppIopsPerDeviceThreshold": null, + "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240, + "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1, + "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos", + "rebuildIoPriorityQuietPeriodInMsec": null, + "replicationCapacityMaxRatio": 32, + "rmcacheWriteHandlingMode": "Cached", + "sparePercentage": 10, + "useRfcache": false, + "useRmcache": false, + "vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps": null, + "vtreeMigrationIoPriorityAppIopsPerDeviceThreshold": null, + "vtreeMigrationIoPriorityBwLimitPerDeviceInKbps": 10240, + "vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice": 1, + "vtreeMigrationIoPriorityPolicy": "favorAppIos", + "vtreeMigrationIoPriorityQuietPeriodInMsec": null, + "zeroPaddingEnabled": true } ] Volumes: @@ -392,15 +781,163 @@ type: list contains: id: - description: volume id. + description: The ID of the volume. type: str + mappedSdcInfo: + description: The details of the mapped SDC. + type: complex + contains: + sdcId: + description: ID of the SDC. + type: str + sdcName: + description: Name of the SDC. + type: str + sdcIp: + description: IP of the SDC. + type: str + accessMode: + description: mapping access mode for the specified volume. + type: str + limitIops: + description: IOPS limit for the SDC. + type: int + limitBwInMbps: + description: Bandwidth limit for the SDC. + type: int name: - description: volume name. + description: Name of the volume. + type: str + sizeInKb: + description: Size of the volume in Kb. + type: int + sizeInGb: + description: Size of the volume in Gb. + type: int + storagePoolId: + description: ID of the storage pool in which volume resides. + type: str + storagePoolName: + description: Name of the storage pool in which volume resides. + type: str + protectionDomainId: + description: ID of the protection domain in which volume resides. type: str + protectionDomainName: + description: Name of the protection domain in which volume resides. + type: str + snapshotPolicyId: + description: ID of the snapshot policy associated with volume. + type: str + snapshotPolicyName: + description: Name of the snapshot policy associated with volume. + type: str + snapshotsList: + description: List of snapshots associated with the volume. + type: str + "statistics": + description: Statistics details of the storage pool. + type: complex + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int sample: [ { - "id": "cdd883cf00000002", - "name": "ansible-volume-1" + "accessModeLimit": "ReadWrite", + "ancestorVolumeId": null, + "autoSnapshotGroupId": null, + "compressionMethod": "Invalid", + "consistencyGroupId": null, + "creationTime": 1661234220, + "dataLayout": "MediumGranularity", + "id": "456afd7XXXXXXX", + "lockedAutoSnapshot": false, + "lockedAutoSnapshotMarkedForRemoval": false, + "managedBy": "ScaleIO", + "mappedSdcInfo": [ + { + "accessMode": "ReadWrite", + "isDirectBufferMapping": false, + "limitBwInMbps": 0, + "limitIops": 0, + "sdcId": "c42425cbXXXXX", + "sdcIp": "10.XXX.XX.XX", + "sdcName": null + } + ], + "name": "vol-1", + "notGenuineSnapshot": false, + "originalExpiryTime": 0, + "pairIds": null, + "replicationJournalVolume": false, + "replicationTimeStamp": 0, + "retentionLevels": [ + ], + "secureSnapshotExpTime": 0, + "sizeInKb": 8388608, + "snplIdOfAutoSnapshot": null, + "snplIdOfSourceVolume": null, + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, + "storagePoolId": "7630a248XXXXXXX", + "timeStampIsAccurate": false, + "useRmcache": false, + "volumeReplicationState": "UnmarkedForReplication", + "volumeType": "ThinProvisioned", + "vtreeId": "32b168bXXXXXX" } ] Devices: @@ -586,6 +1123,12 @@ def get_storage_pool_list(self, filter_dict=None): pool = self.powerflex_conn.storage_pool.get(filter_fields=filter_dict) else: pool = self.powerflex_conn.storage_pool.get() + + if pool: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_storagepools() + list_of_pool_ids_in_statistics = statistics_map.keys() + for item in pool: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_pool_ids_in_statistics else {} return result_list(pool) except Exception as e: @@ -604,6 +1147,12 @@ def get_volumes_list(self, filter_dict=None): volumes = self.powerflex_conn.volume.get(filter_fields=filter_dict) else: volumes = self.powerflex_conn.volume.get() + + if volumes: + statistics_map = self.powerflex_conn.utility.get_statistics_for_all_volumes() + list_of_vol_ids_in_statistics = statistics_map.keys() + for item in volumes: + item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_vol_ids_in_statistics else {} return result_list(volumes) except Exception as e: @@ -756,12 +1305,7 @@ def result_list(entity): LOG.info('Successfully listed.') for item in entity: if item['name']: - result.append( - { - "name": item['name'], - "id": item['id'] - } - ) + result.append(item) else: result.append({"id": item['id']}) return result diff --git a/plugins/modules/storagepool.py b/plugins/modules/storagepool.py index 957565d..bd7665b 100644 --- a/plugins/modules/storagepool.py +++ b/plugins/modules/storagepool.py @@ -174,6 +174,19 @@ protectionDomainName: description: Name of the protection domain in which pool resides. type: str + "statistics": + description: Statistics details of the storage pool. + type: complex + contains: + "capacityInUseInKb": + description: Total capacity of the storage pool. + type: str + "unusedCapacityInKb": + description: Unused capacity of the storage pool. + type: str + "deviceIds": + description: Device Ids of the storage pool. + type: list sample: { "addressSpaceUsage": "Normal", "addressSpaceUsageType": "DeviceCapacityLimit", @@ -235,6 +248,272 @@ "rel": "/api/parent/relationship/protectionDomainId" } ], + "statistics": { + "BackgroundScannedInMB": 3466920, + "activeBckRebuildCapacityInKb": 0, + "activeEnterProtectedMaintenanceModeCapacityInKb": 0, + "aggregateCompressionLevel": "Uncompressed", + "atRestCapacityInKb": 1248256, + "backgroundScanCompareErrorCount": 0, + "backgroundScanFixedCompareErrorCount": 0, + "bckRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "bckRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "capacityAvailableForVolumeAllocationInKb": 369098752, + "capacityInUseInKb": 2496512, + "capacityInUseNoOverheadInKb": 2496512, + "capacityLimitInKb": 845783040, + "compressedDataCompressionRatio": 0.0, + "compressionRatio": 1.0, + "currentFglMigrationSizeInKb": 0, + "deviceIds": [ + ], + "enterProtectedMaintenanceModeCapacityInKb": 0, + "enterProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "enterProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exitProtectedMaintenanceModeWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "exposedCapacityInKb": 0, + "failedCapacityInKb": 0, + "fwdRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "fwdRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "inMaintenanceCapacityInKb": 0, + "inMaintenanceVacInKb": 0, + "inUseVacInKb": 184549376, + "inaccessibleCapacityInKb": 0, + "logWrittenBlocksInKb": 0, + "maxCapacityInKb": 845783040, + "migratingVolumeIds": [ + ], + "migratingVtreeIds": [ + ], + "movingCapacityInKb": 0, + "netCapacityInUseInKb": 1248256, + "normRebuildCapacityInKb": 0, + "normRebuildReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "normRebuildWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "numOfDeviceAtFaultRebuilds": 0, + "numOfDevices": 3, + "numOfIncomingVtreeMigrations": 0, + "numOfVolumes": 8, + "numOfVolumesInDeletion": 0, + "numOfVtrees": 8, + "overallUsageRatio": 73.92289, + "pendingBckRebuildCapacityInKb": 0, + "pendingEnterProtectedMaintenanceModeCapacityInKb": 0, + "pendingExitProtectedMaintenanceModeCapacityInKb": 0, + "pendingFwdRebuildCapacityInKb": 0, + "pendingMovingCapacityInKb": 0, + "pendingMovingInBckRebuildJobs": 0, + "persistentChecksumBuilderProgress": 100.0, + "persistentChecksumCapacityInKb": 414720, + "primaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "primaryVacInKb": 92274688, + "primaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "protectedCapacityInKb": 2496512, + "protectedVacInKb": 184549376, + "provisionedAddressesInKb": 2496512, + "rebalanceCapacityInKb": 0, + "rebalanceReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rebalanceWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "rfacheReadHit": 0, + "rfacheWriteHit": 0, + "rfcacheAvgReadTime": 0, + "rfcacheAvgWriteTime": 0, + "rfcacheIoErrors": 0, + "rfcacheIosOutstanding": 0, + "rfcacheIosSkipped": 0, + "rfcacheReadMiss": 0, + "rmPendingAllocatedInKb": 0, + "rmPendingThickInKb": 0, + "rplJournalCapAllowed": 0, + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "secondaryReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromDevBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryReadFromRmcacheBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "secondaryVacInKb": 92274688, + "secondaryWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "semiProtectedCapacityInKb": 0, + "semiProtectedVacInKb": 0, + "snapCapacityInUseInKb": 0, + "snapCapacityInUseOccupiedInKb": 0, + "snapshotCapacityInKb": 0, + "spSdsIds": [ + "abdfe71b00030001", + "abdce71d00040001", + "abdde71e00050001" + ], + "spareCapacityInKb": 84578304, + "targetOtherLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "targetWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "tempCapacityInKb": 0, + "tempCapacityVacInKb": 0, + "thickCapacityInUseInKb": 0, + "thinAndSnapshotRatio": 73.92289, + "thinCapacityAllocatedInKm": 184549376, + "thinCapacityInUseInKb": 0, + "thinUserDataCapacityInKb": 2496512, + "totalFglMigrationSizeInKb": 0, + "totalReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "totalWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "trimmedUserDataCapacityInKb": 0, + "unreachableUnusedCapacityInKb": 0, + "unusedCapacityInKb": 758708224, + "userDataCapacityInKb": 2496512, + "userDataCapacityNoTrimInKb": 2496512, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volMigrationWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "volumeAddressSpaceInKb": 922XXXXX, + "volumeAllocationLimitInKb": 3707XXXXX, + "volumeIds": [ + "456afc7900XXXXXXXX" + ], + "vtreeAddresSpaceInKb": 92274688, + "vtreeIds": [ + "32b1681bXXXXXXXX", + ] + }, "mediaType": "HDD", "name": "pool1", "numOfParallelRebuildRebalanceJobsPerDevice": 2, @@ -387,6 +666,8 @@ def get_storage_pool(self, storage_pool_id=None, storage_pool_name=None, self.module.fail_json(msg=err_msg) elif len(pool_details) == 1: pool_details = pool_details[0] + statistics = self.powerflex_conn.storage_pool.get_statistics(pool_details['id']) + pool_details['statistics'] = statistics if statistics else {} pd_id = pool_details['protectionDomainId'] pd_name = self.get_protection_domain( protection_domain_id=pd_id)['name'] diff --git a/plugins/modules/volume.py b/plugins/modules/volume.py index 60acced..bf8d08a 100644 --- a/plugins/modules/volume.py +++ b/plugins/modules/volume.py @@ -35,7 +35,7 @@ - The ID of the volume. - Except create operation, all other operations can be performed using vol_id. - - Mutually exclusive with vol_id. + - Mutually exclusive with vol_name. type: str storage_pool_name: description: @@ -123,7 +123,7 @@ type: str allow_multiple_mappings: description: - - Specifies whether to allow multiple mappings or not. + - Specifies whether to allow or not allow multiple mappings. - If the volume is mapped to one SDC then for every new mapping allow_multiple_mappings has to be passed as True. type: bool @@ -366,6 +366,16 @@ snapshotsList: description: List of snapshots associated with the volume. type: str + "statistics": + description: Statistics details of the storage pool. + type: complex + contains: + "numOfChildVolumes": + description: Number of child volumes. + type: int + "numOfMappedSdcs": + description: Number of mapped Sdcs of the volume. + type: int sample: { "accessModeLimit": "ReadWrite", "ancestorVolumeId": null, @@ -468,6 +478,57 @@ "vtreeId": "6e86255c00000001" } ], + "statistics": { + "childVolumeIds": [ + ], + "descendantVolumeIds": [ + ], + "initiatorSdcId": null, + "mappedSdcIds": [ + "c42425XXXXXX" + ], + "numOfChildVolumes": 0, + "numOfDescendantVolumes": 0, + "numOfMappedSdcs": 1, + "registrationKey": null, + "registrationKeys": [ + ], + "replicationJournalVolume": false, + "replicationState": "UnmarkedForReplication", + "reservationType": "NotReserved", + "rplTotalJournalCap": 0, + "rplUsedJournalCap": 0, + "userDataReadBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcReadLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcTrimLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataSdcWriteLatency": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataTrimBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + }, + "userDataWriteBwc": { + "numOccured": 0, + "numSeconds": 0, + "totalWeightInKb": 0 + } + }, "snplIdOfAutoSnapshot": null, "snplIdOfSourceVolume": null, "storagePoolId": "e0d8f6c900000000", @@ -1419,6 +1480,10 @@ def show_output(self, vol_id): filter_fields={'ancestorVolumeId': volume_details[0]['id']}) volume_details[0]['snapshotsList'] = list_of_snaps + # Append statistics + statistics = self.powerflex_conn.volume.get_statistics(volume_details[0]['id']) + volume_details[0]['statistics'] = statistics if statistics else {} + return volume_details[0] except Exception as e: diff --git a/tests/unit/plugins/module_utils/mock_info_api.py b/tests/unit/plugins/module_utils/mock_info_api.py new file mode 100644 index 0000000..3cba1c8 --- /dev/null +++ b/tests/unit/plugins/module_utils/mock_info_api.py @@ -0,0 +1,229 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of info module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi + + +__metaclass__ = type + + +class MockInfoApi: + INFO_COMMON_ARGS = { + "gateway_host": "**.***.**.***", + "gather_subset": [], + "filters": None + } + + DUMMY_IP = 'xx.xx.xx.xx' + INFO_ARRAY_DETAILS = [ + { + 'systemVersionName': 'DellEMC PowerFlex Version', + 'perfProfile': 'Compact', + 'authenticationMethod': 'Native', + 'capacityAlertHighThresholdPercent': 80, + 'capacityAlertCriticalThresholdPercent': 90, + 'upgradeState': 'NoUpgrade', + 'remoteReadOnlyLimitState': False, + 'mdmManagementPort': 6611, + 'mdmExternalPort': 7611, + 'sdcMdmNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 800, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 4000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 20000, + 'windowSizeInSec': 86400 + } + }, + 'sdcMemoryAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcSocketAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdcLongOperationsCounterParameters': { + 'shortWindow': { + 'threshold': 10000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 100000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 1000000, + 'windowSizeInSec': 86400 + } + }, + 'cliPasswordAllowed': True, + 'managementClientSecureCommunicationEnabled': True, + 'tlsVersion': 'TLSv1.2', + 'showGuid': True, + 'defragmentationEnabled': True, + 'mdmSecurityPolicy': 'None', + 'mdmCluster': { + 'clusterState': 'ClusteredNormal', + 'clusterMode': 'ThreeNodes', + 'slaves': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'virtualInterfaces': [ + '' + ], + 'opensslVersion': 'OpenSSL 26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node1_MDM', + 'id': 'test_id_1', + 'port': 0000 + } + ], + 'goodNodesNum': 3, + 'master': { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': 'R3_6.0.0', + 'virtualInterfaces': [ + 'ens192' + ], + 'opensslVersion': 'OpenSSL26 Jan 2017', + 'role': 'Manager', + 'status': 'Normal', + 'name': 'test_node_0', + 'id': 'test_id_2', + 'port': 0000 + }, + 'tieBreakers': [ + { + 'managementIPs': [ + DUMMY_IP + ], + 'ips': [ + DUMMY_IP + ], + 'versionInfo': '', + 'opensslVersion': 'N/A', + 'role': 'TieBreaker', + 'status': 'Normal', + 'id': 'test_id_3', + 'port': 0000 + } + ], + 'goodReplicasNum': 2, + 'id': '' + }, + 'sdcSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'addressSpaceUsage': 'Normal', + 'lastUpgradeTime': 0, + 'sdcSdrConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'sdrSdsConnectivityInfo': { + 'clientServerConnectivityStatus': 'AllConnected', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'isInitialLicense': False, + 'capacityTimeLeftInDays': '253', + 'swid': 'abcdXXX', + 'installId': 'id_111', + 'restrictedSdcModeEnabled': False, + 'restrictedSdcMode': 'None', + 'enterpriseFeaturesEnabled': True, + 'daysInstalled': 112, + 'maxCapacityInGb': '5120', + 'id': 'id_222' + } + ] + + INFO_VOLUME_GET_LIST = MockVolumeApi.VOLUME_GET_LIST + + INFO_VOLUME_STATISTICS = { + 'test_vol_id_1': MockVolumeApi.VOLUME_STATISTICS + } + + INFO_STORAGE_POOL_GET_LIST = MockStoragePoolApi.STORAGE_POOL_GET_LIST + + INFO_STORAGE_POOL_STATISTICS = { + 'test_pool_id_1': MockStoragePoolApi.STORAGE_POOL_STATISTICS + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'volume_get_details': + return "Get volumes list from powerflex array failed with error " + elif response_type == 'sp_get_details': + return "Get storage pool list from powerflex array failed with error " diff --git a/tests/unit/plugins/module_utils/mock_storagepool_api.py b/tests/unit/plugins/module_utils/mock_storagepool_api.py new file mode 100644 index 0000000..60b274f --- /dev/null +++ b/tests/unit/plugins/module_utils/mock_storagepool_api.py @@ -0,0 +1,467 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of storage pool module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + + +class MockStoragePoolApi: + STORAGE_POOL_COMMON_ARGS = { + "gateway_host": "**.***.**.***", + "storage_pool_name": None, + "storage_pool_id": None, + "storage_pool_new_name": None, + "protection_domain_name": None, + "protection_domain_id": None, + "use_rmcache": None, + "use_rfcache": None, + "media_type": None, + 'state': None + } + + STORAGE_POOL_GET_LIST = [ + { + 'protectionDomainId': '4eeb304600000000', + 'rebuildEnabled': True, + 'dataLayout': 'MediumGranularity', + 'persistentChecksumState': 'Protected', + 'addressSpaceUsage': 'Normal', + 'externalAccelerationType': 'None', + 'rebalanceEnabled': True, + 'sparePercentage': 10, + 'rmcacheWriteHandlingMode': 'Cached', + 'checksumEnabled': False, + 'useRfcache': False, + 'compressionMethod': 'Invalid', + 'fragmentationEnabled': True, + 'numOfParallelRebuildRebalanceJobsPerDevice': 2, + 'capacityAlertHighThreshold': 80, + 'capacityAlertCriticalThreshold': 90, + 'capacityUsageState': 'Normal', + 'capacityUsageType': 'NetCapacity', + 'addressSpaceUsageType': 'DeviceCapacityLimit', + 'bgScannerCompareErrorAction': 'ReportAndFix', + 'bgScannerReadErrorAction': 'ReportAndFix', + 'fglExtraCapacity': None, + 'fglOverProvisioningFactor': None, + 'fglWriteAtomicitySize': None, + 'fglMaxCompressionRatio': None, + 'fglPerfProfile': None, + 'replicationCapacityMaxRatio': 0, + 'persistentChecksumEnabled': True, + 'persistentChecksumBuilderLimitKb': 3072, + 'persistentChecksumValidateOnRead': False, + 'useRmcache': False, + 'fglAccpId': None, + 'rebuildIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebalanceIoPriorityPolicy': 'favorAppIos', + 'vtreeMigrationIoPriorityPolicy': 'favorAppIos', + 'protectedMaintenanceModeIoPriorityPolicy': 'limitNumOfConcurrentIos', + 'rebuildIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebalanceIoPriorityNumOfConcurrentIosPerDevice': 1, + 'vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice': 1, + 'protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice': 1, + 'rebuildIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebalanceIoPriorityBwLimitPerDeviceInKbps': 10240, + 'vtreeMigrationIoPriorityBwLimitPerDeviceInKbps': 10240, + 'protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps': 10240, + 'rebuildIoPriorityAppIopsPerDeviceThreshold': None, + 'rebalanceIoPriorityAppIopsPerDeviceThreshold': None, + 'vtreeMigrationIoPriorityAppIopsPerDeviceThreshold': None, + 'protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold': None, + 'rebuildIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebalanceIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps': None, + 'rebuildIoPriorityQuietPeriodInMsec': None, + 'rebalanceIoPriorityQuietPeriodInMsec': None, + 'vtreeMigrationIoPriorityQuietPeriodInMsec': None, + 'protectedMaintenanceModeIoPriorityQuietPeriodInMsec': None, + 'zeroPaddingEnabled': True, + 'backgroundScannerMode': 'DataComparison', + 'backgroundScannerBWLimitKBps': 3072, + 'fglMetadataSizeXx100': None, + 'fglNvdimmWriteCacheSizeInMb': None, + 'fglNvdimmMetadataAmortizationX100': None, + 'mediaType': 'HDD', + 'name': 'test_pool', + 'id': 'test_pool_id_1' + } + ] + + STORAGE_POOL_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + 'test_vol_id_1' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'dv_id_1', + 'dv_id_2', + 'dv_id_3' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + 'vtree_id_1' + ], + 'activeMovingCapacityInKb': 1, + 'targetWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 1, + 'rfcacheIosSkipped': 1, + 'userDataWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceVacInKb': 1, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'netFglSparesInKb': 1, + 'rfcacheReadsSkipped': 1, + 'activeExitProtectedMaintenanceModeCapacityInKb': 1, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 1, + 'numOfUnmappedVolumes': 2, + 'tempCapacityVacInKb': 1, + 'volumeAddressSpaceInKb': 80000, + 'currentFglMigrationSizeInKb': 1, + 'rfcacheWritesSkippedMaxIoSize': 1, + 'netMaxUserDataCapacityInKb': 380600000, + 'numOfMigratingVtrees': 1, + 'atRestCapacityInKb': 1, + 'rfacheWriteHit': 1, + 'bckRebuildReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSourceDeviceWrites': 1, + 'spareCapacityInKb': 84578000, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheIoErrors': 1, + 'inaccessibleCapacityInKb': 1, + 'normRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'capacityInUseInKb': 1, + 'rebalanceReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadsSkippedMaxIoSize': 1, + 'activeMovingInExitProtectedMaintenanceModeJobs': 1, + 'secondaryReadFromDevBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'secondaryReadBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheWritesSkippedStuckIo': 1, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inMaintenanceCapacityInKb': 1, + 'exposedCapacityInKb': 1, + 'netFglCompressedDataSizeInKb': 1, + 'userDataSdcWriteLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'inUseVacInKb': 16777000, + 'fwdRebuildCapacityInKb': 1, + 'thickCapacityInUseInKb': 1, + 'backgroundScanReadErrorCount': 1, + 'activeMovingInRebalanceJobs': 1, + 'migratingVolumeIds': [ + '1xxx' + ], + 'rfcacheWritesSkippedLowResources': 1, + 'capacityInUseNoOverheadInKb': 1, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheSkippedUnlinedWrite': 1, + 'netCapacityInUseInKb': 1, + 'numOfOutgoingMigrations': 1, + 'rfcacheAvgWriteTime': 1, + 'pendingNormRebuildCapacityInKb': 1, + 'pendingMovingOutNormrebuildJobs': 1, + 'rfcacheSourceDeviceReads': 1, + 'rfcacheReadsPending': 1, + 'volumeAllocationLimitInKb': 3791650000, + 'rfcacheReadsSkippedHeavyLoad': 1, + 'fwdRebuildWriteBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'rfcacheReadMiss': 1, + 'targetReadLatency': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'userDataCapacityInKb': 1, + 'activeMovingInBckRebuildJobs': 1, + 'movingCapacityInKb': 1, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 1, + 'backgroundScanCompareErrorCount': 1, + 'pendingMovingInFwdRebuildJobs': 1, + 'rfcacheReadsReceived': 1, + 'spSdsIds': [ + 'sp_id_1', + 'sp_id_2', + 'sp_id_3' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 1, + 'vtreeAddresSpaceInKb': 8388000, + 'snapCapacityInUseOccupiedInKb': 1, + 'activeFwdRebuildCapacityInKb': 1, + 'rfcacheReadsSkippedStuckIo': 1, + 'activeMovingOutNormRebuildJobs': 1, + 'rfcacheWritePending': 1, + 'numOfThinBaseVolumes': 2, + 'degradedFailedVacInKb': 1, + 'userDataTrimBwc': { + 'numSeconds': 1, + 'totalWeightInKb': 1, + 'numOccured': 1 + }, + 'numOfIncomingVtreeMigrations': 1 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the storage pool test_pool with error " diff --git a/tests/unit/plugins/module_utils/mock_volume_api.py b/tests/unit/plugins/module_utils/mock_volume_api.py new file mode 100644 index 0000000..1ac4f84 --- /dev/null +++ b/tests/unit/plugins/module_utils/mock_volume_api.py @@ -0,0 +1,548 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +""" +Mock Api response for Unit tests of volume module on Dell Technologies (Dell) PowerFlex +""" + +from __future__ import (absolute_import, division, print_function) +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi + +__metaclass__ = type + + +class MockVolumeApi: + VOLUME_COMMON_ARGS = { + "gateway_host": "**.***.**.***", + "vol_name": None, + "vol_id": None, + "vol_type": None, + "compression_type": None, + "storage_pool_name": None, + "storage_pool_id": None, + "protection_domain_name": None, + "protection_domain_id": None, + "snapshot_policy_name": None, + "snapshot_policy_id": None, + "auto_snap_remove_type": None, + "use_rmcache": None, + "size": None, + "cap_unit": None, + "vol_new_name": None, + "sdc": {}, + "sdc_state": None, + "delete_snapshots": None, + "state": None + } + + VOLUME_GET_LIST = [ + { + 'storagePoolId': 'test_pool_id_1', + 'dataLayout': 'MediumGranularity', + 'vtreeId': 'vtree_id_1', + 'sizeInKb': 8388608, + 'snplIdOfAutoSnapshot': None, + 'volumeType': 'ThinProvisioned', + 'consistencyGroupId': None, + 'ancestorVolumeId': None, + 'notGenuineSnapshot': False, + 'accessModeLimit': 'ReadWrite', + 'secureSnapshotExpTime': 0, + 'useRmcache': False, + 'managedBy': 'ScaleIO', + 'lockedAutoSnapshot': False, + 'lockedAutoSnapshotMarkedForRemoval': False, + 'autoSnapshotGroupId': None, + 'compressionMethod': 'Invalid', + 'pairIds': None, + 'timeStampIsAccurate': False, + 'mappedSdcInfo': None, + 'originalExpiryTime': 0, + 'retentionLevels': [ + ], + 'snplIdOfSourceVolume': None, + 'volumeReplicationState': 'UnmarkedForReplication', + 'replicationJournalVolume': False, + 'replicationTimeStamp': 0, + 'creationTime': 1655878090, + 'name': 'testing', + 'id': 'test_id_1' + } + ] + + VOLUME_STORAGEPOOL_DETAILS = MockStoragePoolApi.STORAGE_POOL_GET_LIST[0] + + VOLUME_PD_DETAILS = { + 'rebalanceNetworkThrottlingEnabled': False, + 'vtreeMigrationNetworkThrottlingEnabled': False, + 'overallIoNetworkThrottlingEnabled': False, + 'rfcacheEnabled': True, + 'rfcacheAccpId': None, + 'rebuildNetworkThrottlingEnabled': False, + 'sdrSdsConnectivityInfo': { + 'clientServerConnStatus': 'CLIENT_SERVER_CONN_STATUS_ALL_CONNECTED', + 'disconnectedClientId': None, + 'disconnectedClientName': None, + 'disconnectedServerId': None, + 'disconnectedServerName': None, + 'disconnectedServerIp': None + }, + 'protectionDomainState': 'Active', + 'rebuildNetworkThrottlingInKbps': None, + 'rebalanceNetworkThrottlingInKbps': None, + 'overallIoNetworkThrottlingInKbps': None, + 'vtreeMigrationNetworkThrottlingInKbps': None, + 'sdsDecoupledCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsConfigurationFailureCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'mdmSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'sdsSdsNetworkDisconnectionsCounterParameters': { + 'shortWindow': { + 'threshold': 300, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 500, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 700, + 'windowSizeInSec': 86400 + } + }, + 'rfcacheOpertionalMode': 'WriteMiss', + 'rfcachePageSizeKb': 64, + 'rfcacheMaxIoSizeKb': 128, + 'sdsReceiveBufferAllocationFailuresCounterParameters': { + 'shortWindow': { + 'threshold': 20000, + 'windowSizeInSec': 60 + }, + 'mediumWindow': { + 'threshold': 200000, + 'windowSizeInSec': 3600 + }, + 'longWindow': { + 'threshold': 2000000, + 'windowSizeInSec': 86400 + } + }, + 'fglDefaultNumConcurrentWrites': 1000, + 'fglMetadataCacheEnabled': False, + 'fglDefaultMetadataCacheSize': 0, + 'protectedMaintenanceModeNetworkThrottlingEnabled': False, + 'protectedMaintenanceModeNetworkThrottlingInKbps': None, + 'rplCapAlertLevel': 'normal', + 'systemId': 'syst_id_1', + 'name': 'domain1', + 'id': '4eeb304600000000', + } + + VOLUME_STATISTICS = { + 'backgroundScanFixedReadErrorCount': 0, + 'pendingMovingOutBckRebuildJobs': 0, + 'degradedHealthyCapacityInKb': 0, + 'activeMovingOutFwdRebuildJobs': 0, + 'bckRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglUncompressedDataSizeInKb': 0, + 'primaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'BackgroundScannedInMB': 3209584, + 'volumeIds': [ + '456ad22e00000003' + ], + 'maxUserDataCapacityInKb': 761204736, + 'persistentChecksumBuilderProgress': 100.0, + 'rfcacheReadsSkippedAlignedSizeTooLarge': 0, + 'pendingMovingInRebalanceJobs': 0, + 'rfcacheWritesSkippedHeavyLoad': 0, + 'unusedCapacityInKb': 761204736, + 'userDataSdcReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'totalReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfDeviceAtFaultRebuilds': 0, + 'totalWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'persistentChecksumCapacityInKb': 414720, + 'rmPendingAllocatedInKb': 0, + 'numOfVolumes': 1, + 'rfcacheIosOutstanding': 0, + 'capacityAvailableForVolumeAllocationInKb': 377487360, + 'numOfMappedToAllVolumes': 0, + 'netThinUserDataCapacityInKb': 0, + 'backgroundScanFixedCompareErrorCount': 0, + 'volMigrationWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinAndSnapshotRatio': 'Infinity', + 'fglUserDataCapacityInKb': 0, + 'pendingMovingInEnterProtectedMaintenanceModeJobs': 0, + 'activeMovingInNormRebuildJobs': 0, + 'aggregateCompressionLevel': 'Uncompressed', + 'targetOtherLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netUserDataCapacityInKb': 0, + 'pendingMovingOutExitProtectedMaintenanceModeJobs': 0, + 'overallUsageRatio': 'Infinity', + 'volMigrationReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netCapacityInUseNoOverheadInKb': 0, + 'pendingMovingInBckRebuildJobs': 0, + 'rfcacheReadsSkippedInternalError': 0, + 'activeBckRebuildCapacityInKb': 0, + 'rebalanceCapacityInKb': 0, + 'pendingMovingInExitProtectedMaintenanceModeJobs': 0, + 'rfcacheReadsSkippedLowResources': 0, + 'rplJournalCapAllowed': 0, + 'thinCapacityInUseInKb': 0, + 'userDataSdcTrimLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeMovingInEnterProtectedMaintenanceModeJobs': 0, + 'rfcacheWritesSkippedInternalError': 0, + 'netUserDataCapacityNoTrimInKb': 0, + 'rfcacheWritesSkippedCacheMiss': 0, + 'degradedFailedCapacityInKb': 0, + 'activeNormRebuildCapacityInKb': 0, + 'fglSparesInKb': 0, + 'snapCapacityInUseInKb': 0, + 'numOfMigratingVolumes': 0, + 'compressionRatio': 0.0, + 'rfcacheWriteMiss': 0, + 'primaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'migratingVtreeIds': [ + ], + 'numOfVtrees': 1, + 'userDataCapacityNoTrimInKb': 0, + 'rfacheReadHit': 0, + 'compressedDataCompressionRatio': 0.0, + 'rplUsedJournalCap': 0, + 'pendingMovingCapacityInKb': 0, + 'numOfSnapshots': 0, + 'pendingFwdRebuildCapacityInKb': 0, + 'tempCapacityInKb': 0, + 'totalFglMigrationSizeInKb': 0, + 'normRebuildCapacityInKb': 0, + 'logWrittenBlocksInKb': 0, + 'primaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfThickBaseVolumes': 0, + 'enterProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'activeRebalanceCapacityInKb': 0, + 'numOfReplicationJournalVolumes': 0, + 'rfcacheReadsSkippedLockIos': 0, + 'unreachableUnusedCapacityInKb': 0, + 'netProvisionedAddressesInKb': 0, + 'trimmedUserDataCapacityInKb': 0, + 'provisionedAddressesInKb': 0, + 'numOfVolumesInDeletion': 0, + 'pendingMovingOutFwdRebuildJobs': 0, + 'maxCapacityInKb': 845783040, + 'rmPendingThickInKb': 0, + 'protectedCapacityInKb': 0, + 'secondaryWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'normRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'thinCapacityAllocatedInKb': 16777216, + 'netFglUserDataCapacityInKb': 0, + 'metadataOverheadInKb': 0, + 'thinCapacityAllocatedInKm': 16777216, + 'rebalanceWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'primaryVacInKb': 8388608, + 'deviceIds': [ + 'bbd7580800030001', + 'bbd4580a00040001', + 'bbd5580b00050001' + ], + 'netSnapshotCapacityInKb': 0, + 'secondaryVacInKb': 8388608, + 'numOfDevices': 3, + 'rplTotalJournalCap': 0, + 'failedCapacityInKb': 0, + 'netMetadataOverheadInKb': 0, + 'activeMovingOutBckRebuildJobs': 0, + 'rfcacheReadsFromCache': 0, + 'activeMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'enterProtectedMaintenanceModeCapacityInKb': 0, + 'pendingMovingInNormRebuildJobs': 0, + 'failedVacInKb': 0, + 'primaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'fglUncompressedDataSizeInKb': 0, + 'fglCompressedDataSizeInKb': 0, + 'pendingRebalanceCapacityInKb': 0, + 'rfcacheAvgReadTime': 0, + 'semiProtectedCapacityInKb': 0, + 'pendingMovingOutEnterProtectedMaintenanceModeJobs': 0, + 'mgUserDdataCcapacityInKb': 0, + 'snapshotCapacityInKb': 0, + 'netMgUserDataCapacityInKb': 0, + 'fwdRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesReceived': 0, + 'netUnusedCapacityInKb': 380602368, + 'thinUserDataCapacityInKb': 0, + 'protectedVacInKb': 16777216, + 'activeMovingRebalanceJobs': 0, + 'bckRebuildCapacityInKb': 0, + 'activeMovingInFwdRebuildJobs': 0, + 'netTrimmedUserDataCapacityInKb': 0, + 'pendingMovingRebalanceJobs': 0, + 'numOfMarkedVolumesForReplication': 0, + 'degradedHealthyVacInKb': 0, + 'semiProtectedVacInKb': 0, + 'userDataReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingBckRebuildCapacityInKb': 0, + 'capacityLimitInKb': 845783040, + 'vtreeIds': [ + '32b13de900000003' + ], + 'activeMovingCapacityInKb': 0, + 'targetWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'pendingExitProtectedMaintenanceModeCapacityInKb': 0, + 'rfcacheIosSkipped': 0, + 'userDataWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceVacInKb': 0, + 'exitProtectedMaintenanceModeReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'netFglSparesInKb': 0, + 'rfcacheReadsSkipped': 0, + 'activeExitProtectedMaintenanceModeCapacityInKb': 0, + 'activeMovingOutExitProtectedMaintenanceModeJobs': 0, + 'numOfUnmappedVolumes': 1, + 'tempCapacityVacInKb': 0, + 'volumeAddressSpaceInKb': 8388608, + 'currentFglMigrationSizeInKb': 0, + 'rfcacheWritesSkippedMaxIoSize': 0, + 'netMaxUserDataCapacityInKb': 380602368, + 'numOfMigratingVtrees': 0, + 'atRestCapacityInKb': 0, + 'rfacheWriteHit': 0, + 'bckRebuildReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSourceDeviceWrites': 0, + 'spareCapacityInKb': 84578304, + 'enterProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheIoErrors': 0, + 'inaccessibleCapacityInKb': 0, + 'normRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'capacityInUseInKb': 0, + 'rebalanceReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadsSkippedMaxIoSize': 0, + 'activeMovingInExitProtectedMaintenanceModeJobs': 0, + 'secondaryReadFromDevBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'secondaryReadBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheWritesSkippedStuckIo': 0, + 'secondaryReadFromRmcacheBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inMaintenanceCapacityInKb': 0, + 'exposedCapacityInKb': 0, + 'netFglCompressedDataSizeInKb': 0, + 'userDataSdcWriteLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'inUseVacInKb': 16777216, + 'fwdRebuildCapacityInKb': 0, + 'thickCapacityInUseInKb': 0, + 'backgroundScanReadErrorCount': 0, + 'activeMovingInRebalanceJobs': 0, + 'migratingVolumeIds': [ + ], + 'rfcacheWritesSkippedLowResources': 0, + 'capacityInUseNoOverheadInKb': 0, + 'exitProtectedMaintenanceModeWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheSkippedUnlinedWrite': 0, + 'netCapacityInUseInKb': 0, + 'numOfOutgoingMigrations': 0, + 'rfcacheAvgWriteTime': 0, + 'pendingNormRebuildCapacityInKb': 0, + 'pendingMovingOutNormrebuildJobs': 0, + 'rfcacheSourceDeviceReads': 0, + 'rfcacheReadsPending': 0, + 'volumeAllocationLimitInKb': 3791650816, + 'rfcacheReadsSkippedHeavyLoad': 0, + 'fwdRebuildWriteBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'rfcacheReadMiss': 0, + 'targetReadLatency': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'userDataCapacityInKb': 0, + 'activeMovingInBckRebuildJobs': 0, + 'movingCapacityInKb': 0, + 'activeEnterProtectedMaintenanceModeCapacityInKb': 0, + 'backgroundScanCompareErrorCount': 0, + 'pendingMovingInFwdRebuildJobs': 0, + 'rfcacheReadsReceived': 0, + 'spSdsIds': [ + 'abdfe71b00030001', + 'abdce71d00040001', + 'abdde71e00050001' + ], + 'pendingEnterProtectedMaintenanceModeCapacityInKb': 0, + 'vtreeAddresSpaceInKb': 8388608, + 'snapCapacityInUseOccupiedInKb': 0, + 'activeFwdRebuildCapacityInKb': 0, + 'rfcacheReadsSkippedStuckIo': 0, + 'activeMovingOutNormRebuildJobs': 0, + 'rfcacheWritePending': 0, + 'numOfThinBaseVolumes': 1, + 'degradedFailedVacInKb': 0, + 'userDataTrimBwc': { + 'numSeconds': 0, + 'totalWeightInKb': 0, + 'numOccured': 0 + }, + 'numOfIncomingVtreeMigrations': 0 + } + + @staticmethod + def get_exception_response(response_type): + if response_type == 'get_details': + return "Failed to get the volume test_id_1 with error " diff --git a/tests/unit/plugins/modules/test_info.py b/tests/unit/plugins/modules/test_info.py new file mode 100644 index 0000000..3f32353 --- /dev/null +++ b/tests/unit/plugins/modules/test_info.py @@ -0,0 +1,109 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for info module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_info_api import MockInfoApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.info import PowerFlexInfo + + +class TestPowerflexInfo(): + + get_module_args = MockInfoApi.INFO_COMMON_ARGS + + @pytest.fixture + def info_module_mock(self, mocker): + info_module_mock = PowerFlexInfo() + info_module_mock.module.check_mode = False + info_module_mock.powerflex_conn.system.api_version = MagicMock( + return_value=3.5 + ) + info_module_mock.powerflex_conn.system.get = MagicMock( + return_value=MockInfoApi.INFO_ARRAY_DETAILS + ) + return info_module_mock + + def test_get_volume_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_stat_resp = MockInfoApi.INFO_VOLUME_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + return_value=volume_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.volume.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes.assert_called() + + def test_get_volume_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['vol'] + }) + info_module_mock.module.params = self.get_module_args + volume_resp = MockInfoApi.INFO_VOLUME_GET_LIST + info_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('volume_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] + + def test_get_sp_details(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + sp_stat_resp = MockInfoApi.INFO_STORAGE_POOL_STATISTICS + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + return_value=sp_stat_resp + ) + info_module_mock.perform_module_operation() + info_module_mock.powerflex_conn.storage_pool.get.assert_called() + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools.assert_called() + + def test_get_sp_details_with_exception(self, info_module_mock): + self.get_module_args.update({ + "gather_subset": ['storage_pool'] + }) + info_module_mock.module.params = self.get_module_args + sp_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST + info_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=sp_resp + ) + info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock( + side_effect=MockApiException + ) + info_module_mock.perform_module_operation() + assert MockInfoApi.get_exception_response('sp_get_details') in info_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/tests/unit/plugins/modules/test_storagepool.py b/tests/unit/plugins/modules/test_storagepool.py new file mode 100644 index 0000000..4384338 --- /dev/null +++ b/tests/unit/plugins/modules/test_storagepool.py @@ -0,0 +1,73 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for storage pool module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool + + +class TestPowerflexStoragePool(): + + get_module_args = MockStoragePoolApi.STORAGE_POOL_COMMON_ARGS + + @pytest.fixture + def storagepool_module_mock(self, mocker): + storagepool_module_mock = PowerFlexStoragePool() + storagepool_module_mock.module.check_mode = False + return storagepool_module_mock + + def test_get_storagepool_details(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool", + "state": "present" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + return_value=storagepool_statistics_resp + ) + storagepool_module_mock.perform_module_operation() + storagepool_module_mock.powerflex_conn.storage_pool.get.assert_called() + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called() + + def test_get_storagepool_details_with_exception(self, storagepool_module_mock): + self.get_module_args.update({ + "storage_pool_name": "test_pool" + }) + storagepool_module_mock.module.params = self.get_module_args + storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST + storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock( + return_value=storagepool_resp + ) + storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock( + side_effect=MockApiException + ) + storagepool_module_mock.create_storage_pool = MagicMock(return_value=None) + storagepool_module_mock.perform_module_operation() + assert MockStoragePoolApi.get_exception_response('get_details') in storagepool_module_mock.module.fail_json.call_args[1]['msg'] diff --git a/tests/unit/plugins/modules/test_volume.py b/tests/unit/plugins/modules/test_volume.py new file mode 100644 index 0000000..d23532c --- /dev/null +++ b/tests/unit/plugins/modules/test_volume.py @@ -0,0 +1,82 @@ +# Copyright: (c) 2022, Dell Technologies + +# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt) + +"""Unit Tests for volume module on PowerFlex""" + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +import pytest +from mock.mock import MagicMock +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \ + import MockSDKResponse +from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \ + import MockApiException +from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \ + import utils + +utils.get_logger = MagicMock() +utils.get_powerflex_gateway_host_connection = MagicMock() +utils.PowerFlexClient = MagicMock() +utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) + +from ansible.module_utils import basic +basic.AnsibleModule = MagicMock() +from ansible_collections.dellemc.powerflex.plugins.modules.volume import PowerFlexVolume + + +class TestPowerflexVolume(): + + get_module_args = MockVolumeApi.VOLUME_COMMON_ARGS + + @pytest.fixture + def volume_module_mock(self, mocker): + volume_module_mock = PowerFlexVolume() + volume_module_mock.module.check_mode = False + return volume_module_mock + + def test_get_volume_details(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_sp_resp = MockVolumeApi.VOLUME_STORAGEPOOL_DETAILS + volume_module_mock.get_storage_pool = MagicMock( + return_value=volume_sp_resp + ) + volume_pd_resp = MockVolumeApi.VOLUME_PD_DETAILS + volume_module_mock.get_protection_domain = MagicMock( + return_value=volume_pd_resp + ) + volume_statistics_resp = MockVolumeApi.VOLUME_STATISTICS + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + return_value=volume_statistics_resp + ) + volume_module_mock.perform_module_operation() + volume_module_mock.powerflex_conn.volume.get.assert_called() + volume_module_mock.powerflex_conn.volume.get_statistics.assert_called() + + def test_get_volume_details_with_exception(self, volume_module_mock): + self.get_module_args.update({ + "vol_name": "testing", + "state": "present" + }) + volume_module_mock.module.params = self.get_module_args + volume_resp = MockVolumeApi.VOLUME_GET_LIST + volume_module_mock.powerflex_conn.volume.get = MagicMock( + return_value=volume_resp + ) + volume_module_mock.powerflex_conn.volume.get_statistics = MagicMock( + side_effect=MockApiException + ) + volume_module_mock.create_volume = MagicMock(return_value=None) + volume_module_mock.perform_module_operation() + assert MockVolumeApi.get_exception_response('get_details') in volume_module_mock.module.fail_json.call_args[1]['msg'] From b326b3e13eee3f07953ca7487d36a37426592b2f Mon Sep 17 00:00:00 2001 From: ananthu-kuttattu Date: Mon, 19 Sep 2022 16:35:38 +0530 Subject: [PATCH 2/2] Ansible modules for PowerFlex release version 1.4.0 --- tests/unit/plugins/modules/test_info.py | 1 - tests/unit/plugins/modules/test_mdm_cluster.py | 2 +- tests/unit/plugins/modules/test_storagepool.py | 1 - tests/unit/plugins/modules/test_volume.py | 1 - 4 files changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/unit/plugins/modules/test_info.py b/tests/unit/plugins/modules/test_info.py index 3f32353..6a8083b 100644 --- a/tests/unit/plugins/modules/test_info.py +++ b/tests/unit/plugins/modules/test_info.py @@ -21,7 +21,6 @@ utils.get_logger = MagicMock() utils.get_powerflex_gateway_host_connection = MagicMock() utils.PowerFlexClient = MagicMock() -utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) from ansible.module_utils import basic basic.AnsibleModule = MagicMock() diff --git a/tests/unit/plugins/modules/test_mdm_cluster.py b/tests/unit/plugins/modules/test_mdm_cluster.py index 1b8dcfc..f8f3cdc 100644 --- a/tests/unit/plugins/modules/test_mdm_cluster.py +++ b/tests/unit/plugins/modules/test_mdm_cluster.py @@ -26,7 +26,7 @@ from ansible_collections.dellemc.powerflex.plugins.modules.mdm_cluster import PowerFlexMdmCluster -class TestPowerflexProtectionDomain(): +class TestPowerflexMDMCluster(): get_module_args = MockMdmClusterApi.MDM_CLUSTER_COMMON_ARGS add_mdm_ip = "xx.3x.xx.xx" diff --git a/tests/unit/plugins/modules/test_storagepool.py b/tests/unit/plugins/modules/test_storagepool.py index 4384338..a2c463f 100644 --- a/tests/unit/plugins/modules/test_storagepool.py +++ b/tests/unit/plugins/modules/test_storagepool.py @@ -21,7 +21,6 @@ utils.get_logger = MagicMock() utils.get_powerflex_gateway_host_connection = MagicMock() utils.PowerFlexClient = MagicMock() -utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) from ansible.module_utils import basic basic.AnsibleModule = MagicMock() diff --git a/tests/unit/plugins/modules/test_volume.py b/tests/unit/plugins/modules/test_volume.py index d23532c..53cdcfc 100644 --- a/tests/unit/plugins/modules/test_volume.py +++ b/tests/unit/plugins/modules/test_volume.py @@ -21,7 +21,6 @@ utils.get_logger = MagicMock() utils.get_powerflex_gateway_host_connection = MagicMock() utils.PowerFlexClient = MagicMock() -utils.pypowerflex_version_check = MagicMock(return_value={'dependency_present': True, 'error_message': ""}) from ansible.module_utils import basic basic.AnsibleModule = MagicMock()